2024-11-13 13:33:05,960 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca 2024-11-13 13:33:05,972 main DEBUG Took 0.010645 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-11-13 13:33:05,973 main DEBUG PluginManager 'Core' found 129 plugins 2024-11-13 13:33:05,973 main DEBUG PluginManager 'Level' found 0 plugins 2024-11-13 13:33:05,974 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-11-13 13:33:05,975 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-13 13:33:05,982 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-11-13 13:33:05,994 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-13 13:33:05,996 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-13 13:33:05,997 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-13 13:33:05,997 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-13 13:33:05,997 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-13 13:33:05,998 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-13 13:33:05,999 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-13 13:33:05,999 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-13 13:33:06,000 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-13 13:33:06,000 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-13 13:33:06,001 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-13 13:33:06,001 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-13 13:33:06,002 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-13 13:33:06,002 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-13 13:33:06,003 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-13 13:33:06,003 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-13 13:33:06,003 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-13 13:33:06,004 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-13 13:33:06,004 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-13 13:33:06,004 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-13 13:33:06,005 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-13 13:33:06,005 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-13 13:33:06,006 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-13 13:33:06,006 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-13 13:33:06,006 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-13 13:33:06,007 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-11-13 13:33:06,008 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-13 13:33:06,009 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-11-13 13:33:06,011 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-11-13 13:33:06,012 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-11-13 13:33:06,013 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-11-13 13:33:06,013 main DEBUG PluginManager 'Converter' found 47 plugins 2024-11-13 13:33:06,021 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-11-13 13:33:06,024 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-11-13 13:33:06,025 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-11-13 13:33:06,026 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-11-13 13:33:06,026 main DEBUG createAppenders(={Console}) 2024-11-13 13:33:06,027 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca initialized 2024-11-13 13:33:06,027 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca 2024-11-13 13:33:06,028 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca OK. 2024-11-13 13:33:06,028 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-11-13 13:33:06,028 main DEBUG OutputStream closed 2024-11-13 13:33:06,029 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-11-13 13:33:06,029 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-11-13 13:33:06,029 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@6404f418 OK 2024-11-13 13:33:06,097 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-11-13 13:33:06,099 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-11-13 13:33:06,100 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-11-13 13:33:06,101 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-11-13 13:33:06,102 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-11-13 13:33:06,102 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-11-13 13:33:06,103 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-11-13 13:33:06,103 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-11-13 13:33:06,103 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-11-13 13:33:06,104 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-11-13 13:33:06,104 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-11-13 13:33:06,104 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-11-13 13:33:06,105 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-11-13 13:33:06,105 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-11-13 13:33:06,105 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-11-13 13:33:06,106 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-11-13 13:33:06,106 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-11-13 13:33:06,107 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-11-13 13:33:06,109 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-13 13:33:06,110 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-logging/target/hbase-logging-3.0.0-beta-2-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@6dab9b6d) with optional ClassLoader: null 2024-11-13 13:33:06,110 main DEBUG Shutdown hook enabled. Registering a new one. 2024-11-13 13:33:06,111 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@6dab9b6d] started OK. 2024-11-13T13:33:06,323 DEBUG [main {}] hbase.HBaseTestingUtil(323): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7e988ba8-14d4-a440-4ce7-dbb30dde5b42 2024-11-13 13:33:06,326 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-11-13 13:33:06,326 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-13T13:33:06,335 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.regionserver.wal.TestLogRolling timeout: 13 mins 2024-11-13T13:33:06,371 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testSlowSyncLogRolling Thread=12, OpenFileDescriptor=287, MaxFileDescriptor=1048576, SystemLoadAverage=220, ProcessCount=11, AvailableMemoryMB=5043 2024-11-13T13:33:06,375 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-13T13:33:06,395 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7e988ba8-14d4-a440-4ce7-dbb30dde5b42/cluster_bc4ed2ba-25d8-59f1-9b5b-16401e7ae59d, deleteOnExit=true 2024-11-13T13:33:06,396 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-13T13:33:06,397 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7e988ba8-14d4-a440-4ce7-dbb30dde5b42/test.cache.data in system properties and HBase conf 2024-11-13T13:33:06,398 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7e988ba8-14d4-a440-4ce7-dbb30dde5b42/hadoop.tmp.dir in system properties and HBase conf 2024-11-13T13:33:06,399 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7e988ba8-14d4-a440-4ce7-dbb30dde5b42/hadoop.log.dir in system properties and HBase conf 2024-11-13T13:33:06,400 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7e988ba8-14d4-a440-4ce7-dbb30dde5b42/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-13T13:33:06,401 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7e988ba8-14d4-a440-4ce7-dbb30dde5b42/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-13T13:33:06,401 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-13T13:33:06,498 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-11-13T13:33:06,584 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-13T13:33:06,588 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7e988ba8-14d4-a440-4ce7-dbb30dde5b42/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-13T13:33:06,588 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7e988ba8-14d4-a440-4ce7-dbb30dde5b42/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-13T13:33:06,589 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7e988ba8-14d4-a440-4ce7-dbb30dde5b42/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-13T13:33:06,589 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7e988ba8-14d4-a440-4ce7-dbb30dde5b42/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-13T13:33:06,590 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7e988ba8-14d4-a440-4ce7-dbb30dde5b42/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-13T13:33:06,590 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7e988ba8-14d4-a440-4ce7-dbb30dde5b42/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-13T13:33:06,591 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7e988ba8-14d4-a440-4ce7-dbb30dde5b42/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-13T13:33:06,592 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7e988ba8-14d4-a440-4ce7-dbb30dde5b42/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-13T13:33:06,592 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7e988ba8-14d4-a440-4ce7-dbb30dde5b42/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-13T13:33:06,593 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7e988ba8-14d4-a440-4ce7-dbb30dde5b42/nfs.dump.dir in system properties and HBase conf 2024-11-13T13:33:06,593 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7e988ba8-14d4-a440-4ce7-dbb30dde5b42/java.io.tmpdir in system properties and HBase conf 2024-11-13T13:33:06,593 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7e988ba8-14d4-a440-4ce7-dbb30dde5b42/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-13T13:33:06,594 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7e988ba8-14d4-a440-4ce7-dbb30dde5b42/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-13T13:33:06,594 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7e988ba8-14d4-a440-4ce7-dbb30dde5b42/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-13T13:33:07,067 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-13T13:33:07,689 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-11-13T13:33:07,756 INFO [Time-limited test {}] log.Log(170): Logging initialized @2443ms to org.eclipse.jetty.util.log.Slf4jLog 2024-11-13T13:33:07,822 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-13T13:33:07,880 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-13T13:33:07,900 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-13T13:33:07,900 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-13T13:33:07,902 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-13T13:33:07,914 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-13T13:33:07,917 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@ddc8467{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7e988ba8-14d4-a440-4ce7-dbb30dde5b42/hadoop.log.dir/,AVAILABLE} 2024-11-13T13:33:07,918 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@70be1389{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-13T13:33:08,105 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@735fa16a{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7e988ba8-14d4-a440-4ce7-dbb30dde5b42/java.io.tmpdir/jetty-localhost-41715-hadoop-hdfs-3_4_1-tests_jar-_-any-17954480213700029734/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-13T13:33:08,113 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6c26a5a3{HTTP/1.1, (http/1.1)}{localhost:41715} 2024-11-13T13:33:08,113 INFO [Time-limited test {}] server.Server(415): Started @2801ms 2024-11-13T13:33:08,147 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-13T13:33:08,869 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-13T13:33:08,879 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-13T13:33:08,880 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-13T13:33:08,881 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-13T13:33:08,881 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-13T13:33:08,884 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@28778f0f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7e988ba8-14d4-a440-4ce7-dbb30dde5b42/hadoop.log.dir/,AVAILABLE} 2024-11-13T13:33:08,885 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@371e191c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-13T13:33:08,991 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7b07d1ba{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7e988ba8-14d4-a440-4ce7-dbb30dde5b42/java.io.tmpdir/jetty-localhost-46851-hadoop-hdfs-3_4_1-tests_jar-_-any-9421335490031639813/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-13T13:33:08,992 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@43e0a762{HTTP/1.1, (http/1.1)}{localhost:46851} 2024-11-13T13:33:08,992 INFO [Time-limited test {}] server.Server(415): Started @3680ms 2024-11-13T13:33:09,044 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-13T13:33:09,155 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-13T13:33:09,166 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-13T13:33:09,169 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-13T13:33:09,169 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-13T13:33:09,169 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-13T13:33:09,170 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@11effdcd{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7e988ba8-14d4-a440-4ce7-dbb30dde5b42/hadoop.log.dir/,AVAILABLE} 2024-11-13T13:33:09,171 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2d48d695{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-13T13:33:09,283 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1bf97579{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7e988ba8-14d4-a440-4ce7-dbb30dde5b42/java.io.tmpdir/jetty-localhost-38857-hadoop-hdfs-3_4_1-tests_jar-_-any-12742450113978283987/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-13T13:33:09,284 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@22b88bcb{HTTP/1.1, (http/1.1)}{localhost:38857} 2024-11-13T13:33:09,284 INFO [Time-limited test {}] server.Server(415): Started @3972ms 2024-11-13T13:33:09,287 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-13T13:33:10,610 WARN [Thread-100 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7e988ba8-14d4-a440-4ce7-dbb30dde5b42/cluster_bc4ed2ba-25d8-59f1-9b5b-16401e7ae59d/data/data3/current/BP-164363160-172.17.0.2-1731504787152/current, will proceed with Du for space computation calculation, 2024-11-13T13:33:10,610 WARN [Thread-101 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7e988ba8-14d4-a440-4ce7-dbb30dde5b42/cluster_bc4ed2ba-25d8-59f1-9b5b-16401e7ae59d/data/data4/current/BP-164363160-172.17.0.2-1731504787152/current, will proceed with Du for space computation calculation, 2024-11-13T13:33:10,610 WARN [Thread-98 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7e988ba8-14d4-a440-4ce7-dbb30dde5b42/cluster_bc4ed2ba-25d8-59f1-9b5b-16401e7ae59d/data/data1/current/BP-164363160-172.17.0.2-1731504787152/current, will proceed with Du for space computation calculation, 2024-11-13T13:33:10,610 WARN [Thread-99 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7e988ba8-14d4-a440-4ce7-dbb30dde5b42/cluster_bc4ed2ba-25d8-59f1-9b5b-16401e7ae59d/data/data2/current/BP-164363160-172.17.0.2-1731504787152/current, will proceed with Du for space computation calculation, 2024-11-13T13:33:10,643 WARN [Thread-81 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-13T13:33:10,644 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-13T13:33:10,694 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x43ac69aa602466a9 with lease ID 0xaa0e4462096ff5b5: Processing first storage report for DS-b4a134dd-46ba-4d6f-89a3-9669c7252571 from datanode DatanodeRegistration(127.0.0.1:35783, datanodeUuid=f3c2b58d-5e24-44a9-a0dc-778fb35f8a5b, infoPort=44455, infoSecurePort=0, ipcPort=36373, storageInfo=lv=-57;cid=testClusterID;nsid=1242109750;c=1731504787152) 2024-11-13T13:33:10,696 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x43ac69aa602466a9 with lease ID 0xaa0e4462096ff5b5: from storage DS-b4a134dd-46ba-4d6f-89a3-9669c7252571 node DatanodeRegistration(127.0.0.1:35783, datanodeUuid=f3c2b58d-5e24-44a9-a0dc-778fb35f8a5b, infoPort=44455, infoSecurePort=0, ipcPort=36373, storageInfo=lv=-57;cid=testClusterID;nsid=1242109750;c=1731504787152), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-13T13:33:10,696 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x80816fa16f2b5fe4 with lease ID 0xaa0e4462096ff5b6: Processing first storage report for DS-1bae7c9b-f15f-412d-a5a8-9fed873b45ef from datanode DatanodeRegistration(127.0.0.1:35441, datanodeUuid=a9c36546-5d6b-40f8-87d9-e6813ef6ccd0, infoPort=45729, infoSecurePort=0, ipcPort=45813, storageInfo=lv=-57;cid=testClusterID;nsid=1242109750;c=1731504787152) 2024-11-13T13:33:10,697 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x80816fa16f2b5fe4 with lease ID 0xaa0e4462096ff5b6: from storage DS-1bae7c9b-f15f-412d-a5a8-9fed873b45ef node DatanodeRegistration(127.0.0.1:35441, datanodeUuid=a9c36546-5d6b-40f8-87d9-e6813ef6ccd0, infoPort=45729, infoSecurePort=0, ipcPort=45813, storageInfo=lv=-57;cid=testClusterID;nsid=1242109750;c=1731504787152), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-13T13:33:10,697 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x43ac69aa602466a9 with lease ID 0xaa0e4462096ff5b5: Processing first storage report for DS-2a80887a-a4db-4dff-a7f6-91c00b875919 from datanode DatanodeRegistration(127.0.0.1:35783, datanodeUuid=f3c2b58d-5e24-44a9-a0dc-778fb35f8a5b, infoPort=44455, infoSecurePort=0, ipcPort=36373, storageInfo=lv=-57;cid=testClusterID;nsid=1242109750;c=1731504787152) 2024-11-13T13:33:10,698 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x43ac69aa602466a9 with lease ID 0xaa0e4462096ff5b5: from storage DS-2a80887a-a4db-4dff-a7f6-91c00b875919 node DatanodeRegistration(127.0.0.1:35783, datanodeUuid=f3c2b58d-5e24-44a9-a0dc-778fb35f8a5b, infoPort=44455, infoSecurePort=0, ipcPort=36373, storageInfo=lv=-57;cid=testClusterID;nsid=1242109750;c=1731504787152), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-13T13:33:10,698 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x80816fa16f2b5fe4 with lease ID 0xaa0e4462096ff5b6: Processing first storage report for DS-cd62315f-b986-428e-a59c-917b0dcb9775 from datanode DatanodeRegistration(127.0.0.1:35441, datanodeUuid=a9c36546-5d6b-40f8-87d9-e6813ef6ccd0, infoPort=45729, infoSecurePort=0, ipcPort=45813, storageInfo=lv=-57;cid=testClusterID;nsid=1242109750;c=1731504787152) 2024-11-13T13:33:10,698 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x80816fa16f2b5fe4 with lease ID 0xaa0e4462096ff5b6: from storage DS-cd62315f-b986-428e-a59c-917b0dcb9775 node DatanodeRegistration(127.0.0.1:35441, datanodeUuid=a9c36546-5d6b-40f8-87d9-e6813ef6ccd0, infoPort=45729, infoSecurePort=0, ipcPort=45813, storageInfo=lv=-57;cid=testClusterID;nsid=1242109750;c=1731504787152), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-13T13:33:10,702 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7e988ba8-14d4-a440-4ce7-dbb30dde5b42 2024-11-13T13:33:10,772 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7e988ba8-14d4-a440-4ce7-dbb30dde5b42/cluster_bc4ed2ba-25d8-59f1-9b5b-16401e7ae59d/zookeeper_0, clientPort=49285, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7e988ba8-14d4-a440-4ce7-dbb30dde5b42/cluster_bc4ed2ba-25d8-59f1-9b5b-16401e7ae59d/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7e988ba8-14d4-a440-4ce7-dbb30dde5b42/cluster_bc4ed2ba-25d8-59f1-9b5b-16401e7ae59d/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-13T13:33:10,783 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=49285 2024-11-13T13:33:10,796 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T13:33:10,799 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T13:33:11,014 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35441 is added to blk_1073741825_1001 (size=7) 2024-11-13T13:33:11,015 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35783 is added to blk_1073741825_1001 (size=7) 2024-11-13T13:33:11,433 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:38779/user/jenkins/test-data/52d71c14-a597-0de0-63d6-b75f134ce0ba with version=8 2024-11-13T13:33:11,433 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1139): Setting hbase.fs.tmp.dir to hdfs://localhost:38779/user/jenkins/test-data/52d71c14-a597-0de0-63d6-b75f134ce0ba/hbase-staging 2024-11-13T13:33:11,508 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-11-13T13:33:11,746 INFO [Time-limited test {}] client.ConnectionUtils(128): master/bfeb2336aed7:0 server-side Connection retries=45 2024-11-13T13:33:11,755 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-13T13:33:11,755 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-13T13:33:11,759 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-13T13:33:11,759 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-13T13:33:11,760 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-13T13:33:11,886 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-13T13:33:11,952 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-11-13T13:33:11,963 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-11-13T13:33:11,968 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-13T13:33:11,995 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 97614 (auto-detected) 2024-11-13T13:33:11,996 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:02 (auto-detected) 2024-11-13T13:33:12,016 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:37965 2024-11-13T13:33:12,038 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:37965 connecting to ZooKeeper ensemble=127.0.0.1:49285 2024-11-13T13:33:12,185 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:379650x0, quorum=127.0.0.1:49285, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-13T13:33:12,189 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:37965-0x1013466588e0000 connected 2024-11-13T13:33:12,284 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T13:33:12,289 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T13:33:12,299 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:37965-0x1013466588e0000, quorum=127.0.0.1:49285, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-13T13:33:12,302 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:38779/user/jenkins/test-data/52d71c14-a597-0de0-63d6-b75f134ce0ba, hbase.cluster.distributed=false 2024-11-13T13:33:12,324 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:37965-0x1013466588e0000, quorum=127.0.0.1:49285, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-13T13:33:12,329 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=37965 2024-11-13T13:33:12,329 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=37965 2024-11-13T13:33:12,330 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=37965 2024-11-13T13:33:12,330 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=37965 2024-11-13T13:33:12,330 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=37965 2024-11-13T13:33:12,421 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/bfeb2336aed7:0 server-side Connection retries=45 2024-11-13T13:33:12,422 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-13T13:33:12,423 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-13T13:33:12,423 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-13T13:33:12,423 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-13T13:33:12,423 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-13T13:33:12,425 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-13T13:33:12,428 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-13T13:33:12,429 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:40631 2024-11-13T13:33:12,431 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:40631 connecting to ZooKeeper ensemble=127.0.0.1:49285 2024-11-13T13:33:12,432 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T13:33:12,436 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T13:33:12,450 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:406310x0, quorum=127.0.0.1:49285, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-13T13:33:12,451 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:406310x0, quorum=127.0.0.1:49285, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-13T13:33:12,451 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:40631-0x1013466588e0001 connected 2024-11-13T13:33:12,454 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-13T13:33:12,461 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-13T13:33:12,464 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:40631-0x1013466588e0001, quorum=127.0.0.1:49285, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-13T13:33:12,468 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:40631-0x1013466588e0001, quorum=127.0.0.1:49285, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-13T13:33:12,471 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=40631 2024-11-13T13:33:12,471 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=40631 2024-11-13T13:33:12,472 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=40631 2024-11-13T13:33:12,472 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=40631 2024-11-13T13:33:12,473 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=40631 2024-11-13T13:33:12,486 DEBUG [M:0;bfeb2336aed7:37965 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;bfeb2336aed7:37965 2024-11-13T13:33:12,487 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/bfeb2336aed7,37965,1731504791597 2024-11-13T13:33:12,503 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37965-0x1013466588e0000, quorum=127.0.0.1:49285, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-13T13:33:12,503 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40631-0x1013466588e0001, quorum=127.0.0.1:49285, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-13T13:33:12,506 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:37965-0x1013466588e0000, quorum=127.0.0.1:49285, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/bfeb2336aed7,37965,1731504791597 2024-11-13T13:33:12,534 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37965-0x1013466588e0000, quorum=127.0.0.1:49285, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T13:33:12,534 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40631-0x1013466588e0001, quorum=127.0.0.1:49285, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-13T13:33:12,534 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40631-0x1013466588e0001, quorum=127.0.0.1:49285, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T13:33:12,535 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:37965-0x1013466588e0000, quorum=127.0.0.1:49285, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-13T13:33:12,536 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/bfeb2336aed7,37965,1731504791597 from backup master directory 2024-11-13T13:33:12,545 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37965-0x1013466588e0000, quorum=127.0.0.1:49285, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/bfeb2336aed7,37965,1731504791597 2024-11-13T13:33:12,545 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40631-0x1013466588e0001, quorum=127.0.0.1:49285, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-13T13:33:12,545 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37965-0x1013466588e0000, quorum=127.0.0.1:49285, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-13T13:33:12,545 WARN [master/bfeb2336aed7:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-13T13:33:12,546 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=bfeb2336aed7,37965,1731504791597 2024-11-13T13:33:12,547 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-11-13T13:33:12,549 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-11-13T13:33:12,606 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:38779/user/jenkins/test-data/52d71c14-a597-0de0-63d6-b75f134ce0ba/hbase.id] with ID: 761b42e6-0976-4c54-8d03-a1b334d6c084 2024-11-13T13:33:12,606 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:38779/user/jenkins/test-data/52d71c14-a597-0de0-63d6-b75f134ce0ba/.tmp/hbase.id 2024-11-13T13:33:12,618 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35783 is added to blk_1073741826_1002 (size=42) 2024-11-13T13:33:12,618 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35441 is added to blk_1073741826_1002 (size=42) 2024-11-13T13:33:12,619 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:38779/user/jenkins/test-data/52d71c14-a597-0de0-63d6-b75f134ce0ba/.tmp/hbase.id]:[hdfs://localhost:38779/user/jenkins/test-data/52d71c14-a597-0de0-63d6-b75f134ce0ba/hbase.id] 2024-11-13T13:33:12,661 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T13:33:12,666 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-13T13:33:12,684 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 15ms. 2024-11-13T13:33:12,692 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40631-0x1013466588e0001, quorum=127.0.0.1:49285, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T13:33:12,692 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37965-0x1013466588e0000, quorum=127.0.0.1:49285, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T13:33:12,709 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35441 is added to blk_1073741827_1003 (size=196) 2024-11-13T13:33:12,710 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35783 is added to blk_1073741827_1003 (size=196) 2024-11-13T13:33:12,725 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-13T13:33:12,727 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-13T13:33:12,732 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-13T13:33:12,761 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35783 is added to blk_1073741828_1004 (size=1189) 2024-11-13T13:33:12,761 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35441 is added to blk_1073741828_1004 (size=1189) 2024-11-13T13:33:12,778 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:38779/user/jenkins/test-data/52d71c14-a597-0de0-63d6-b75f134ce0ba/MasterData/data/master/store 2024-11-13T13:33:12,796 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35783 is added to blk_1073741829_1005 (size=34) 2024-11-13T13:33:12,796 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35441 is added to blk_1073741829_1005 (size=34) 2024-11-13T13:33:12,801 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-11-13T13:33:12,805 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T13:33:12,807 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-13T13:33:12,807 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T13:33:12,807 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T13:33:12,809 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-13T13:33:12,809 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T13:33:12,810 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T13:33:12,811 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731504792806Disabling compacts and flushes for region at 1731504792806Disabling writes for close at 1731504792809 (+3 ms)Writing region close event to WAL at 1731504792809Closed at 1731504792809 2024-11-13T13:33:12,813 WARN [master/bfeb2336aed7:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:38779/user/jenkins/test-data/52d71c14-a597-0de0-63d6-b75f134ce0ba/MasterData/data/master/store/.initializing 2024-11-13T13:33:12,814 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:38779/user/jenkins/test-data/52d71c14-a597-0de0-63d6-b75f134ce0ba/MasterData/WALs/bfeb2336aed7,37965,1731504791597 2024-11-13T13:33:12,834 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=bfeb2336aed7%2C37965%2C1731504791597, suffix=, logDir=hdfs://localhost:38779/user/jenkins/test-data/52d71c14-a597-0de0-63d6-b75f134ce0ba/MasterData/WALs/bfeb2336aed7,37965,1731504791597, archiveDir=hdfs://localhost:38779/user/jenkins/test-data/52d71c14-a597-0de0-63d6-b75f134ce0ba/MasterData/oldWALs, maxLogs=10 2024-11-13T13:33:12,841 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor bfeb2336aed7%2C37965%2C1731504791597.1731504792838 2024-11-13T13:33:12,859 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/52d71c14-a597-0de0-63d6-b75f134ce0ba/MasterData/WALs/bfeb2336aed7,37965,1731504791597/bfeb2336aed7%2C37965%2C1731504791597.1731504792838 2024-11-13T13:33:12,868 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44455:44455),(127.0.0.1/127.0.0.1:45729:45729)] 2024-11-13T13:33:12,870 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-13T13:33:12,870 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T13:33:12,873 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-13T13:33:12,874 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-13T13:33:12,907 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-13T13:33:12,929 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-13T13:33:12,932 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T13:33:12,934 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T13:33:12,934 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-13T13:33:12,938 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-13T13:33:12,938 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T13:33:12,939 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T13:33:12,939 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-13T13:33:12,942 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-13T13:33:12,942 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T13:33:12,943 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T13:33:12,943 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-13T13:33:12,945 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-13T13:33:12,946 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T13:33:12,946 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T13:33:12,947 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-13T13:33:12,950 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38779/user/jenkins/test-data/52d71c14-a597-0de0-63d6-b75f134ce0ba/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-13T13:33:12,951 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38779/user/jenkins/test-data/52d71c14-a597-0de0-63d6-b75f134ce0ba/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-13T13:33:12,956 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-13T13:33:12,957 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-13T13:33:12,960 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-13T13:33:12,964 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-13T13:33:12,968 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38779/user/jenkins/test-data/52d71c14-a597-0de0-63d6-b75f134ce0ba/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-13T13:33:12,969 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=830740, jitterRate=0.056341201066970825}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-13T13:33:12,975 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731504792886Initializing all the Stores at 1731504792888 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731504792889 (+1 ms)Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731504792890 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731504792890Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731504792890Cleaning up temporary data from old regions at 1731504792957 (+67 ms)Region opened successfully at 1731504792975 (+18 ms) 2024-11-13T13:33:12,977 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-13T13:33:13,006 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@15c1f4ce, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=bfeb2336aed7/172.17.0.2:0 2024-11-13T13:33:13,032 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-13T13:33:13,041 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-13T13:33:13,041 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-13T13:33:13,044 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-13T13:33:13,046 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 1 msec 2024-11-13T13:33:13,050 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 4 msec 2024-11-13T13:33:13,051 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-13T13:33:13,074 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-13T13:33:13,082 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37965-0x1013466588e0000, quorum=127.0.0.1:49285, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-13T13:33:13,134 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-13T13:33:13,138 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-13T13:33:13,142 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37965-0x1013466588e0000, quorum=127.0.0.1:49285, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-13T13:33:13,155 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-13T13:33:13,158 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-13T13:33:13,163 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37965-0x1013466588e0000, quorum=127.0.0.1:49285, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-13T13:33:13,176 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-13T13:33:13,178 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37965-0x1013466588e0000, quorum=127.0.0.1:49285, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-13T13:33:13,187 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-13T13:33:13,206 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37965-0x1013466588e0000, quorum=127.0.0.1:49285, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-13T13:33:13,218 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-13T13:33:13,229 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37965-0x1013466588e0000, quorum=127.0.0.1:49285, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-13T13:33:13,229 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40631-0x1013466588e0001, quorum=127.0.0.1:49285, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-13T13:33:13,229 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37965-0x1013466588e0000, quorum=127.0.0.1:49285, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T13:33:13,229 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40631-0x1013466588e0001, quorum=127.0.0.1:49285, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T13:33:13,233 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=bfeb2336aed7,37965,1731504791597, sessionid=0x1013466588e0000, setting cluster-up flag (Was=false) 2024-11-13T13:33:13,260 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40631-0x1013466588e0001, quorum=127.0.0.1:49285, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T13:33:13,260 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37965-0x1013466588e0000, quorum=127.0.0.1:49285, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T13:33:13,292 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-13T13:33:13,295 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=bfeb2336aed7,37965,1731504791597 2024-11-13T13:33:13,313 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40631-0x1013466588e0001, quorum=127.0.0.1:49285, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T13:33:13,313 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37965-0x1013466588e0000, quorum=127.0.0.1:49285, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T13:33:13,345 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-13T13:33:13,349 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=bfeb2336aed7,37965,1731504791597 2024-11-13T13:33:13,359 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:38779/user/jenkins/test-data/52d71c14-a597-0de0-63d6-b75f134ce0ba/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-13T13:33:13,377 INFO [RS:0;bfeb2336aed7:40631 {}] regionserver.HRegionServer(746): ClusterId : 761b42e6-0976-4c54-8d03-a1b334d6c084 2024-11-13T13:33:13,380 DEBUG [RS:0;bfeb2336aed7:40631 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-13T13:33:13,389 DEBUG [RS:0;bfeb2336aed7:40631 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-13T13:33:13,389 DEBUG [RS:0;bfeb2336aed7:40631 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-13T13:33:13,399 DEBUG [RS:0;bfeb2336aed7:40631 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-13T13:33:13,399 DEBUG [RS:0;bfeb2336aed7:40631 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7187cfe4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=bfeb2336aed7/172.17.0.2:0 2024-11-13T13:33:13,416 DEBUG [RS:0;bfeb2336aed7:40631 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;bfeb2336aed7:40631 2024-11-13T13:33:13,419 INFO [RS:0;bfeb2336aed7:40631 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-13T13:33:13,420 INFO [RS:0;bfeb2336aed7:40631 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-13T13:33:13,420 DEBUG [RS:0;bfeb2336aed7:40631 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-13T13:33:13,422 INFO [RS:0;bfeb2336aed7:40631 {}] regionserver.HRegionServer(2659): reportForDuty to master=bfeb2336aed7,37965,1731504791597 with port=40631, startcode=1731504792390 2024-11-13T13:33:13,424 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-13T13:33:13,431 DEBUG [RS:0;bfeb2336aed7:40631 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-13T13:33:13,432 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-13T13:33:13,437 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-13T13:33:13,442 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: bfeb2336aed7,37965,1731504791597 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-13T13:33:13,449 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/bfeb2336aed7:0, corePoolSize=5, maxPoolSize=5 2024-11-13T13:33:13,449 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/bfeb2336aed7:0, corePoolSize=5, maxPoolSize=5 2024-11-13T13:33:13,449 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/bfeb2336aed7:0, corePoolSize=5, maxPoolSize=5 2024-11-13T13:33:13,449 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/bfeb2336aed7:0, corePoolSize=5, maxPoolSize=5 2024-11-13T13:33:13,449 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/bfeb2336aed7:0, corePoolSize=10, maxPoolSize=10 2024-11-13T13:33:13,449 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/bfeb2336aed7:0, corePoolSize=1, maxPoolSize=1 2024-11-13T13:33:13,449 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/bfeb2336aed7:0, corePoolSize=2, maxPoolSize=2 2024-11-13T13:33:13,450 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/bfeb2336aed7:0, corePoolSize=1, maxPoolSize=1 2024-11-13T13:33:13,451 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731504823451 2024-11-13T13:33:13,452 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-13T13:33:13,453 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-13T13:33:13,455 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-13T13:33:13,455 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-13T13:33:13,457 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-13T13:33:13,457 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-13T13:33:13,457 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-13T13:33:13,458 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-13T13:33:13,460 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-13T13:33:13,462 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T13:33:13,463 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-13T13:33:13,464 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-13T13:33:13,466 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-13T13:33:13,466 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-13T13:33:13,472 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-13T13:33:13,473 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-13T13:33:13,475 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/bfeb2336aed7:0:becomeActiveMaster-HFileCleaner.large.0-1731504793474,5,FailOnTimeoutGroup] 2024-11-13T13:33:13,476 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/bfeb2336aed7:0:becomeActiveMaster-HFileCleaner.small.0-1731504793475,5,FailOnTimeoutGroup] 2024-11-13T13:33:13,476 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-13T13:33:13,476 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-13T13:33:13,478 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-13T13:33:13,478 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-13T13:33:13,480 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35783 is added to blk_1073741831_1007 (size=1321) 2024-11-13T13:33:13,480 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35441 is added to blk_1073741831_1007 (size=1321) 2024-11-13T13:33:13,482 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:38779/user/jenkins/test-data/52d71c14-a597-0de0-63d6-b75f134ce0ba/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-13T13:33:13,483 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:38779/user/jenkins/test-data/52d71c14-a597-0de0-63d6-b75f134ce0ba 2024-11-13T13:33:13,493 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35441 is added to blk_1073741832_1008 (size=32) 2024-11-13T13:33:13,494 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35783 is added to blk_1073741832_1008 (size=32) 2024-11-13T13:33:13,496 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T13:33:13,497 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49003, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-11-13T13:33:13,499 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-13T13:33:13,502 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-13T13:33:13,502 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T13:33:13,503 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T13:33:13,503 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-13T13:33:13,504 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37965 {}] master.ServerManager(363): Checking decommissioned status of RegionServer bfeb2336aed7,40631,1731504792390 2024-11-13T13:33:13,506 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37965 {}] master.ServerManager(517): Registering regionserver=bfeb2336aed7,40631,1731504792390 2024-11-13T13:33:13,506 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-13T13:33:13,506 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T13:33:13,507 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T13:33:13,508 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-13T13:33:13,510 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-13T13:33:13,511 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T13:33:13,512 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T13:33:13,512 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-13T13:33:13,515 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-13T13:33:13,515 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T13:33:13,516 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T13:33:13,516 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-13T13:33:13,518 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38779/user/jenkins/test-data/52d71c14-a597-0de0-63d6-b75f134ce0ba/data/hbase/meta/1588230740 2024-11-13T13:33:13,519 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38779/user/jenkins/test-data/52d71c14-a597-0de0-63d6-b75f134ce0ba/data/hbase/meta/1588230740 2024-11-13T13:33:13,520 DEBUG [RS:0;bfeb2336aed7:40631 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:38779/user/jenkins/test-data/52d71c14-a597-0de0-63d6-b75f134ce0ba 2024-11-13T13:33:13,520 DEBUG [RS:0;bfeb2336aed7:40631 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:38779 2024-11-13T13:33:13,520 DEBUG [RS:0;bfeb2336aed7:40631 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-13T13:33:13,522 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-13T13:33:13,523 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-13T13:33:13,524 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-13T13:33:13,526 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-13T13:33:13,530 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38779/user/jenkins/test-data/52d71c14-a597-0de0-63d6-b75f134ce0ba/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-13T13:33:13,531 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=725462, jitterRate=-0.07752743363380432}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-13T13:33:13,534 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37965-0x1013466588e0000, quorum=127.0.0.1:49285, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-13T13:33:13,534 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731504793497Initializing all the Stores at 1731504793499 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731504793499Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731504793499Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731504793499Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731504793499Cleaning up temporary data from old regions at 1731504793523 (+24 ms)Region opened successfully at 1731504793534 (+11 ms) 2024-11-13T13:33:13,535 DEBUG [RS:0;bfeb2336aed7:40631 {}] zookeeper.ZKUtil(111): regionserver:40631-0x1013466588e0001, quorum=127.0.0.1:49285, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/bfeb2336aed7,40631,1731504792390 2024-11-13T13:33:13,535 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-13T13:33:13,535 WARN [RS:0;bfeb2336aed7:40631 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-13T13:33:13,535 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-13T13:33:13,535 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-13T13:33:13,535 INFO [RS:0;bfeb2336aed7:40631 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-13T13:33:13,535 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-13T13:33:13,535 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-13T13:33:13,535 DEBUG [RS:0;bfeb2336aed7:40631 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:38779/user/jenkins/test-data/52d71c14-a597-0de0-63d6-b75f134ce0ba/WALs/bfeb2336aed7,40631,1731504792390 2024-11-13T13:33:13,536 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-13T13:33:13,537 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [bfeb2336aed7,40631,1731504792390] 2024-11-13T13:33:13,537 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731504793535Disabling compacts and flushes for region at 1731504793535Disabling writes for close at 1731504793535Writing region close event to WAL at 1731504793536 (+1 ms)Closed at 1731504793536 2024-11-13T13:33:13,540 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-13T13:33:13,540 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-13T13:33:13,547 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-13T13:33:13,557 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-13T13:33:13,560 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-13T13:33:13,562 INFO [RS:0;bfeb2336aed7:40631 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-13T13:33:13,576 INFO [RS:0;bfeb2336aed7:40631 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-13T13:33:13,580 INFO [RS:0;bfeb2336aed7:40631 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-13T13:33:13,580 INFO [RS:0;bfeb2336aed7:40631 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-13T13:33:13,580 INFO [RS:0;bfeb2336aed7:40631 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-13T13:33:13,585 INFO [RS:0;bfeb2336aed7:40631 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-13T13:33:13,587 INFO [RS:0;bfeb2336aed7:40631 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-13T13:33:13,587 DEBUG [RS:0;bfeb2336aed7:40631 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/bfeb2336aed7:0, corePoolSize=1, maxPoolSize=1 2024-11-13T13:33:13,587 DEBUG [RS:0;bfeb2336aed7:40631 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/bfeb2336aed7:0, corePoolSize=1, maxPoolSize=1 2024-11-13T13:33:13,587 DEBUG [RS:0;bfeb2336aed7:40631 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/bfeb2336aed7:0, corePoolSize=1, maxPoolSize=1 2024-11-13T13:33:13,587 DEBUG [RS:0;bfeb2336aed7:40631 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/bfeb2336aed7:0, corePoolSize=1, maxPoolSize=1 2024-11-13T13:33:13,587 DEBUG [RS:0;bfeb2336aed7:40631 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/bfeb2336aed7:0, corePoolSize=1, maxPoolSize=1 2024-11-13T13:33:13,588 DEBUG [RS:0;bfeb2336aed7:40631 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/bfeb2336aed7:0, corePoolSize=2, maxPoolSize=2 2024-11-13T13:33:13,588 DEBUG [RS:0;bfeb2336aed7:40631 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/bfeb2336aed7:0, corePoolSize=1, maxPoolSize=1 2024-11-13T13:33:13,588 DEBUG [RS:0;bfeb2336aed7:40631 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/bfeb2336aed7:0, corePoolSize=1, maxPoolSize=1 2024-11-13T13:33:13,588 DEBUG [RS:0;bfeb2336aed7:40631 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/bfeb2336aed7:0, corePoolSize=1, maxPoolSize=1 2024-11-13T13:33:13,588 DEBUG [RS:0;bfeb2336aed7:40631 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/bfeb2336aed7:0, corePoolSize=1, maxPoolSize=1 2024-11-13T13:33:13,588 DEBUG [RS:0;bfeb2336aed7:40631 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/bfeb2336aed7:0, corePoolSize=1, maxPoolSize=1 2024-11-13T13:33:13,588 DEBUG [RS:0;bfeb2336aed7:40631 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/bfeb2336aed7:0, corePoolSize=1, maxPoolSize=1 2024-11-13T13:33:13,588 DEBUG [RS:0;bfeb2336aed7:40631 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/bfeb2336aed7:0, corePoolSize=3, maxPoolSize=3 2024-11-13T13:33:13,589 DEBUG [RS:0;bfeb2336aed7:40631 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/bfeb2336aed7:0, corePoolSize=3, maxPoolSize=3 2024-11-13T13:33:13,589 INFO [RS:0;bfeb2336aed7:40631 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-13T13:33:13,590 INFO [RS:0;bfeb2336aed7:40631 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-13T13:33:13,590 INFO [RS:0;bfeb2336aed7:40631 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-13T13:33:13,590 INFO [RS:0;bfeb2336aed7:40631 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-13T13:33:13,590 INFO [RS:0;bfeb2336aed7:40631 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-13T13:33:13,590 INFO [RS:0;bfeb2336aed7:40631 {}] hbase.ChoreService(168): Chore ScheduledChore name=bfeb2336aed7,40631,1731504792390-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-13T13:33:13,607 INFO [RS:0;bfeb2336aed7:40631 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-13T13:33:13,609 INFO [RS:0;bfeb2336aed7:40631 {}] hbase.ChoreService(168): Chore ScheduledChore name=bfeb2336aed7,40631,1731504792390-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-13T13:33:13,609 INFO [RS:0;bfeb2336aed7:40631 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-13T13:33:13,610 INFO [RS:0;bfeb2336aed7:40631 {}] regionserver.Replication(171): bfeb2336aed7,40631,1731504792390 started 2024-11-13T13:33:13,627 INFO [RS:0;bfeb2336aed7:40631 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-13T13:33:13,627 INFO [RS:0;bfeb2336aed7:40631 {}] regionserver.HRegionServer(1482): Serving as bfeb2336aed7,40631,1731504792390, RpcServer on bfeb2336aed7/172.17.0.2:40631, sessionid=0x1013466588e0001 2024-11-13T13:33:13,628 DEBUG [RS:0;bfeb2336aed7:40631 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-13T13:33:13,628 DEBUG [RS:0;bfeb2336aed7:40631 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager bfeb2336aed7,40631,1731504792390 2024-11-13T13:33:13,628 DEBUG [RS:0;bfeb2336aed7:40631 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'bfeb2336aed7,40631,1731504792390' 2024-11-13T13:33:13,628 DEBUG [RS:0;bfeb2336aed7:40631 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-13T13:33:13,630 DEBUG [RS:0;bfeb2336aed7:40631 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-13T13:33:13,630 DEBUG [RS:0;bfeb2336aed7:40631 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-13T13:33:13,630 DEBUG [RS:0;bfeb2336aed7:40631 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-13T13:33:13,631 DEBUG [RS:0;bfeb2336aed7:40631 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager bfeb2336aed7,40631,1731504792390 2024-11-13T13:33:13,631 DEBUG [RS:0;bfeb2336aed7:40631 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'bfeb2336aed7,40631,1731504792390' 2024-11-13T13:33:13,631 DEBUG [RS:0;bfeb2336aed7:40631 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-13T13:33:13,632 DEBUG [RS:0;bfeb2336aed7:40631 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-13T13:33:13,632 DEBUG [RS:0;bfeb2336aed7:40631 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-13T13:33:13,632 INFO [RS:0;bfeb2336aed7:40631 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-13T13:33:13,632 INFO [RS:0;bfeb2336aed7:40631 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-13T13:33:13,711 WARN [bfeb2336aed7:37965 {}] assignment.AssignmentManager(2443): No servers available; cannot place 1 unassigned regions. 2024-11-13T13:33:13,745 INFO [RS:0;bfeb2336aed7:40631 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=bfeb2336aed7%2C40631%2C1731504792390, suffix=, logDir=hdfs://localhost:38779/user/jenkins/test-data/52d71c14-a597-0de0-63d6-b75f134ce0ba/WALs/bfeb2336aed7,40631,1731504792390, archiveDir=hdfs://localhost:38779/user/jenkins/test-data/52d71c14-a597-0de0-63d6-b75f134ce0ba/oldWALs, maxLogs=32 2024-11-13T13:33:13,747 INFO [RS:0;bfeb2336aed7:40631 {}] monitor.StreamSlowMonitor(122): New stream slow monitor bfeb2336aed7%2C40631%2C1731504792390.1731504793747 2024-11-13T13:33:13,756 INFO [RS:0;bfeb2336aed7:40631 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/52d71c14-a597-0de0-63d6-b75f134ce0ba/WALs/bfeb2336aed7,40631,1731504792390/bfeb2336aed7%2C40631%2C1731504792390.1731504793747 2024-11-13T13:33:13,758 DEBUG [RS:0;bfeb2336aed7:40631 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45729:45729),(127.0.0.1/127.0.0.1:44455:44455)] 2024-11-13T13:33:13,964 DEBUG [bfeb2336aed7:37965 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-13T13:33:13,978 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=bfeb2336aed7,40631,1731504792390 2024-11-13T13:33:13,985 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as bfeb2336aed7,40631,1731504792390, state=OPENING 2024-11-13T13:33:14,165 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-13T13:33:14,176 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37965-0x1013466588e0000, quorum=127.0.0.1:49285, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T13:33:14,177 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40631-0x1013466588e0001, quorum=127.0.0.1:49285, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T13:33:14,179 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-13T13:33:14,179 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-13T13:33:14,183 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-13T13:33:14,188 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=bfeb2336aed7,40631,1731504792390}] 2024-11-13T13:33:14,366 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-13T13:33:14,369 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47513, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-13T13:33:14,381 INFO [RS_OPEN_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-13T13:33:14,381 INFO [RS_OPEN_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-13T13:33:14,385 INFO [RS_OPEN_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=bfeb2336aed7%2C40631%2C1731504792390.meta, suffix=.meta, logDir=hdfs://localhost:38779/user/jenkins/test-data/52d71c14-a597-0de0-63d6-b75f134ce0ba/WALs/bfeb2336aed7,40631,1731504792390, archiveDir=hdfs://localhost:38779/user/jenkins/test-data/52d71c14-a597-0de0-63d6-b75f134ce0ba/oldWALs, maxLogs=32 2024-11-13T13:33:14,387 INFO [RS_OPEN_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor bfeb2336aed7%2C40631%2C1731504792390.meta.1731504794387.meta 2024-11-13T13:33:14,394 INFO [RS_OPEN_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/52d71c14-a597-0de0-63d6-b75f134ce0ba/WALs/bfeb2336aed7,40631,1731504792390/bfeb2336aed7%2C40631%2C1731504792390.meta.1731504794387.meta 2024-11-13T13:33:14,396 DEBUG [RS_OPEN_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45729:45729),(127.0.0.1/127.0.0.1:44455:44455)] 2024-11-13T13:33:14,396 DEBUG [RS_OPEN_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-13T13:33:14,398 DEBUG [RS_OPEN_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-13T13:33:14,401 DEBUG [RS_OPEN_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-13T13:33:14,405 INFO [RS_OPEN_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-13T13:33:14,409 DEBUG [RS_OPEN_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-13T13:33:14,409 DEBUG [RS_OPEN_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T13:33:14,409 DEBUG [RS_OPEN_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-13T13:33:14,409 DEBUG [RS_OPEN_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-13T13:33:14,412 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-13T13:33:14,414 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-13T13:33:14,414 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T13:33:14,415 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T13:33:14,415 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-13T13:33:14,417 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-13T13:33:14,417 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T13:33:14,418 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T13:33:14,418 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-13T13:33:14,419 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-13T13:33:14,419 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T13:33:14,420 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T13:33:14,420 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-13T13:33:14,422 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-13T13:33:14,422 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T13:33:14,423 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T13:33:14,423 DEBUG [RS_OPEN_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-13T13:33:14,424 DEBUG [RS_OPEN_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38779/user/jenkins/test-data/52d71c14-a597-0de0-63d6-b75f134ce0ba/data/hbase/meta/1588230740 2024-11-13T13:33:14,427 DEBUG [RS_OPEN_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38779/user/jenkins/test-data/52d71c14-a597-0de0-63d6-b75f134ce0ba/data/hbase/meta/1588230740 2024-11-13T13:33:14,429 DEBUG [RS_OPEN_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-13T13:33:14,429 DEBUG [RS_OPEN_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-13T13:33:14,430 DEBUG [RS_OPEN_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-13T13:33:14,432 DEBUG [RS_OPEN_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-13T13:33:14,433 INFO [RS_OPEN_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=832382, jitterRate=0.058428943157196045}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-13T13:33:14,433 DEBUG [RS_OPEN_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-13T13:33:14,435 DEBUG [RS_OPEN_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731504794410Writing region info on filesystem at 1731504794410Initializing all the Stores at 1731504794412 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731504794412Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731504794412Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731504794412Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731504794412Cleaning up temporary data from old regions at 1731504794429 (+17 ms)Running coprocessor post-open hooks at 1731504794434 (+5 ms)Region opened successfully at 1731504794435 (+1 ms) 2024-11-13T13:33:14,440 INFO [RS_OPEN_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731504794355 2024-11-13T13:33:14,450 DEBUG [RS_OPEN_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-13T13:33:14,451 INFO [RS_OPEN_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-13T13:33:14,452 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=bfeb2336aed7,40631,1731504792390 2024-11-13T13:33:14,454 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as bfeb2336aed7,40631,1731504792390, state=OPEN 2024-11-13T13:33:14,534 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40631-0x1013466588e0001, quorum=127.0.0.1:49285, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-13T13:33:14,534 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37965-0x1013466588e0000, quorum=127.0.0.1:49285, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-13T13:33:14,534 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-13T13:33:14,534 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-13T13:33:14,535 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=bfeb2336aed7,40631,1731504792390 2024-11-13T13:33:14,541 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-13T13:33:14,542 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=bfeb2336aed7,40631,1731504792390 in 347 msec 2024-11-13T13:33:14,550 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-13T13:33:14,550 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 996 msec 2024-11-13T13:33:14,552 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-13T13:33:14,552 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-13T13:33:14,570 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-13T13:33:14,571 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=bfeb2336aed7,40631,1731504792390, seqNum=-1] 2024-11-13T13:33:14,588 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-13T13:33:14,590 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33233, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-13T13:33:14,609 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 1.2230 sec 2024-11-13T13:33:14,609 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731504794609, completionTime=-1 2024-11-13T13:33:14,612 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-13T13:33:14,612 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-13T13:33:14,639 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=1 2024-11-13T13:33:14,639 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731504854639 2024-11-13T13:33:14,639 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731504914639 2024-11-13T13:33:14,639 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 27 msec 2024-11-13T13:33:14,642 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=bfeb2336aed7,37965,1731504791597-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-13T13:33:14,642 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=bfeb2336aed7,37965,1731504791597-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-13T13:33:14,643 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=bfeb2336aed7,37965,1731504791597-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-13T13:33:14,644 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-bfeb2336aed7:37965, period=300000, unit=MILLISECONDS is enabled. 2024-11-13T13:33:14,644 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-13T13:33:14,645 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-13T13:33:14,651 DEBUG [master/bfeb2336aed7:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-13T13:33:14,674 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 2.128sec 2024-11-13T13:33:14,675 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-13T13:33:14,676 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-13T13:33:14,678 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-13T13:33:14,679 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-13T13:33:14,679 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-13T13:33:14,680 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=bfeb2336aed7,37965,1731504791597-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-13T13:33:14,680 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=bfeb2336aed7,37965,1731504791597-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-13T13:33:14,689 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@358a6cb3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T13:33:14,690 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-13T13:33:14,691 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-13T13:33:14,691 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=bfeb2336aed7,37965,1731504791597-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-13T13:33:14,691 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-11-13T13:33:14,691 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-11-13T13:33:14,694 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request bfeb2336aed7,37965,-1 for getting cluster id 2024-11-13T13:33:14,697 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-13T13:33:14,704 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '761b42e6-0976-4c54-8d03-a1b334d6c084' 2024-11-13T13:33:14,707 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-13T13:33:14,707 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "761b42e6-0976-4c54-8d03-a1b334d6c084" 2024-11-13T13:33:14,708 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@11c9db71, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T13:33:14,708 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [bfeb2336aed7,37965,-1] 2024-11-13T13:33:14,710 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-13T13:33:14,712 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T13:33:14,714 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59364, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-13T13:33:14,717 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@19f64e80, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T13:33:14,718 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-13T13:33:14,725 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=bfeb2336aed7,40631,1731504792390, seqNum=-1] 2024-11-13T13:33:14,726 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-13T13:33:14,728 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39326, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-13T13:33:14,748 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=bfeb2336aed7,37965,1731504791597 2024-11-13T13:33:14,748 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T13:33:14,755 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-13T13:33:14,759 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-13T13:33:14,764 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.AsyncConnectionImpl(321): The fetched master address is bfeb2336aed7,37965,1731504791597 2024-11-13T13:33:14,767 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@270f05b3 2024-11-13T13:33:14,769 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-13T13:33:14,772 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59380, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-13T13:33:14,774 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37965 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-13T13:33:14,774 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37965 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-13T13:33:14,777 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37965 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testSlowSyncLogRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-13T13:33:14,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37965 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling 2024-11-13T13:33:14,788 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-11-13T13:33:14,790 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37965 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testSlowSyncLogRolling" procId is: 4 2024-11-13T13:33:14,790 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T13:33:14,793 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-13T13:33:14,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37965 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-13T13:33:14,834 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35783 is added to blk_1073741835_1011 (size=389) 2024-11-13T13:33:14,835 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35441 is added to blk_1073741835_1011 (size=389) 2024-11-13T13:33:14,839 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => eb5ebf9e791c82d5ca29a7ecad3b9cb5, NAME => 'TestLogRolling-testSlowSyncLogRolling,,1731504794773.eb5ebf9e791c82d5ca29a7ecad3b9cb5.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testSlowSyncLogRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:38779/user/jenkins/test-data/52d71c14-a597-0de0-63d6-b75f134ce0ba 2024-11-13T13:33:14,852 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35441 is added to blk_1073741836_1012 (size=72) 2024-11-13T13:33:14,852 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35783 is added to blk_1073741836_1012 (size=72) 2024-11-13T13:33:14,853 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testSlowSyncLogRolling,,1731504794773.eb5ebf9e791c82d5ca29a7ecad3b9cb5.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T13:33:14,853 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1722): Closing eb5ebf9e791c82d5ca29a7ecad3b9cb5, disabling compactions & flushes 2024-11-13T13:33:14,854 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testSlowSyncLogRolling,,1731504794773.eb5ebf9e791c82d5ca29a7ecad3b9cb5. 2024-11-13T13:33:14,854 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testSlowSyncLogRolling,,1731504794773.eb5ebf9e791c82d5ca29a7ecad3b9cb5. 2024-11-13T13:33:14,854 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testSlowSyncLogRolling,,1731504794773.eb5ebf9e791c82d5ca29a7ecad3b9cb5. after waiting 0 ms 2024-11-13T13:33:14,854 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testSlowSyncLogRolling,,1731504794773.eb5ebf9e791c82d5ca29a7ecad3b9cb5. 2024-11-13T13:33:14,854 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testSlowSyncLogRolling,,1731504794773.eb5ebf9e791c82d5ca29a7ecad3b9cb5. 2024-11-13T13:33:14,854 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for eb5ebf9e791c82d5ca29a7ecad3b9cb5: Waiting for close lock at 1731504794853Disabling compacts and flushes for region at 1731504794853Disabling writes for close at 1731504794854 (+1 ms)Writing region close event to WAL at 1731504794854Closed at 1731504794854 2024-11-13T13:33:14,856 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_ADD_TO_META 2024-11-13T13:33:14,860 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testSlowSyncLogRolling,,1731504794773.eb5ebf9e791c82d5ca29a7ecad3b9cb5.","families":{"info":[{"qualifier":"regioninfo","vlen":71,"tag":[],"timestamp":"1731504794856"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731504794856"}]},"ts":"1731504794856"} 2024-11-13T13:33:14,865 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-13T13:33:14,868 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-13T13:33:14,870 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testSlowSyncLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731504794868"}]},"ts":"1731504794868"} 2024-11-13T13:33:14,875 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testSlowSyncLogRolling, state=ENABLING in hbase:meta 2024-11-13T13:33:14,877 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=eb5ebf9e791c82d5ca29a7ecad3b9cb5, ASSIGN}] 2024-11-13T13:33:14,880 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=eb5ebf9e791c82d5ca29a7ecad3b9cb5, ASSIGN 2024-11-13T13:33:14,882 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=eb5ebf9e791c82d5ca29a7ecad3b9cb5, ASSIGN; state=OFFLINE, location=bfeb2336aed7,40631,1731504792390; forceNewPlan=false, retain=false 2024-11-13T13:33:15,033 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=eb5ebf9e791c82d5ca29a7ecad3b9cb5, regionState=OPENING, regionLocation=bfeb2336aed7,40631,1731504792390 2024-11-13T13:33:15,038 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=eb5ebf9e791c82d5ca29a7ecad3b9cb5, ASSIGN because future has completed 2024-11-13T13:33:15,039 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure eb5ebf9e791c82d5ca29a7ecad3b9cb5, server=bfeb2336aed7,40631,1731504792390}] 2024-11-13T13:33:15,200 INFO [RS_OPEN_REGION-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testSlowSyncLogRolling,,1731504794773.eb5ebf9e791c82d5ca29a7ecad3b9cb5. 2024-11-13T13:33:15,200 DEBUG [RS_OPEN_REGION-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => eb5ebf9e791c82d5ca29a7ecad3b9cb5, NAME => 'TestLogRolling-testSlowSyncLogRolling,,1731504794773.eb5ebf9e791c82d5ca29a7ecad3b9cb5.', STARTKEY => '', ENDKEY => ''} 2024-11-13T13:33:15,201 DEBUG [RS_OPEN_REGION-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testSlowSyncLogRolling eb5ebf9e791c82d5ca29a7ecad3b9cb5 2024-11-13T13:33:15,201 DEBUG [RS_OPEN_REGION-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testSlowSyncLogRolling,,1731504794773.eb5ebf9e791c82d5ca29a7ecad3b9cb5.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T13:33:15,201 DEBUG [RS_OPEN_REGION-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for eb5ebf9e791c82d5ca29a7ecad3b9cb5 2024-11-13T13:33:15,201 DEBUG [RS_OPEN_REGION-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for eb5ebf9e791c82d5ca29a7ecad3b9cb5 2024-11-13T13:33:15,206 INFO [StoreOpener-eb5ebf9e791c82d5ca29a7ecad3b9cb5-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region eb5ebf9e791c82d5ca29a7ecad3b9cb5 2024-11-13T13:33:15,209 INFO [StoreOpener-eb5ebf9e791c82d5ca29a7ecad3b9cb5-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region eb5ebf9e791c82d5ca29a7ecad3b9cb5 columnFamilyName info 2024-11-13T13:33:15,209 DEBUG [StoreOpener-eb5ebf9e791c82d5ca29a7ecad3b9cb5-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T13:33:15,211 INFO [StoreOpener-eb5ebf9e791c82d5ca29a7ecad3b9cb5-1 {}] regionserver.HStore(327): Store=eb5ebf9e791c82d5ca29a7ecad3b9cb5/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T13:33:15,211 DEBUG [RS_OPEN_REGION-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for eb5ebf9e791c82d5ca29a7ecad3b9cb5 2024-11-13T13:33:15,213 DEBUG [RS_OPEN_REGION-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38779/user/jenkins/test-data/52d71c14-a597-0de0-63d6-b75f134ce0ba/data/default/TestLogRolling-testSlowSyncLogRolling/eb5ebf9e791c82d5ca29a7ecad3b9cb5 2024-11-13T13:33:15,214 DEBUG [RS_OPEN_REGION-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38779/user/jenkins/test-data/52d71c14-a597-0de0-63d6-b75f134ce0ba/data/default/TestLogRolling-testSlowSyncLogRolling/eb5ebf9e791c82d5ca29a7ecad3b9cb5 2024-11-13T13:33:15,215 DEBUG [RS_OPEN_REGION-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for eb5ebf9e791c82d5ca29a7ecad3b9cb5 2024-11-13T13:33:15,215 DEBUG [RS_OPEN_REGION-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for eb5ebf9e791c82d5ca29a7ecad3b9cb5 2024-11-13T13:33:15,218 DEBUG [RS_OPEN_REGION-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for eb5ebf9e791c82d5ca29a7ecad3b9cb5 2024-11-13T13:33:15,222 DEBUG [RS_OPEN_REGION-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38779/user/jenkins/test-data/52d71c14-a597-0de0-63d6-b75f134ce0ba/data/default/TestLogRolling-testSlowSyncLogRolling/eb5ebf9e791c82d5ca29a7ecad3b9cb5/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-13T13:33:15,223 INFO [RS_OPEN_REGION-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened eb5ebf9e791c82d5ca29a7ecad3b9cb5; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=712248, jitterRate=-0.09433001279830933}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-13T13:33:15,224 DEBUG [RS_OPEN_REGION-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for eb5ebf9e791c82d5ca29a7ecad3b9cb5 2024-11-13T13:33:15,225 DEBUG [RS_OPEN_REGION-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for eb5ebf9e791c82d5ca29a7ecad3b9cb5: Running coprocessor pre-open hook at 1731504795201Writing region info on filesystem at 1731504795201Initializing all the Stores at 1731504795203 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731504795206 (+3 ms)Cleaning up temporary data from old regions at 1731504795215 (+9 ms)Running coprocessor post-open hooks at 1731504795224 (+9 ms)Region opened successfully at 1731504795224 2024-11-13T13:33:15,226 INFO [RS_OPEN_REGION-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testSlowSyncLogRolling,,1731504794773.eb5ebf9e791c82d5ca29a7ecad3b9cb5., pid=6, masterSystemTime=1731504795193 2024-11-13T13:33:15,230 DEBUG [RS_OPEN_REGION-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testSlowSyncLogRolling,,1731504794773.eb5ebf9e791c82d5ca29a7ecad3b9cb5. 2024-11-13T13:33:15,230 INFO [RS_OPEN_REGION-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testSlowSyncLogRolling,,1731504794773.eb5ebf9e791c82d5ca29a7ecad3b9cb5. 2024-11-13T13:33:15,231 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=eb5ebf9e791c82d5ca29a7ecad3b9cb5, regionState=OPEN, openSeqNum=2, regionLocation=bfeb2336aed7,40631,1731504792390 2024-11-13T13:33:15,236 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure eb5ebf9e791c82d5ca29a7ecad3b9cb5, server=bfeb2336aed7,40631,1731504792390 because future has completed 2024-11-13T13:33:15,243 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-13T13:33:15,243 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure eb5ebf9e791c82d5ca29a7ecad3b9cb5, server=bfeb2336aed7,40631,1731504792390 in 199 msec 2024-11-13T13:33:15,247 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-13T13:33:15,247 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=eb5ebf9e791c82d5ca29a7ecad3b9cb5, ASSIGN in 366 msec 2024-11-13T13:33:15,248 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-13T13:33:15,249 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testSlowSyncLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731504795248"}]},"ts":"1731504795248"} 2024-11-13T13:33:15,252 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testSlowSyncLogRolling, state=ENABLED in hbase:meta 2024-11-13T13:33:15,254 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_POST_OPERATION 2024-11-13T13:33:15,257 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling in 474 msec 2024-11-13T13:33:19,928 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-13T13:33:19,978 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-13T13:33:19,980 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testSlowSyncLogRolling' 2024-11-13T13:33:21,949 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-13T13:33:21,950 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-13T13:33:21,955 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling 2024-11-13T13:33:21,955 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling Metrics about Tables on a single HBase RegionServer 2024-11-13T13:33:21,958 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-13T13:33:21,958 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-13T13:33:21,959 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-13T13:33:21,959 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-11-13T13:33:24,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37965 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-13T13:33:24,898 INFO [RPCClient-NioEventLoopGroup-4-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testSlowSyncLogRolling completed 2024-11-13T13:33:24,902 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testSlowSyncLogRolling,, stopping at row=TestLogRolling-testSlowSyncLogRolling ,, for max=2147483647 with caching=100 2024-11-13T13:33:24,908 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testSlowSyncLogRolling 2024-11-13T13:33:24,909 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testSlowSyncLogRolling,,1731504794773.eb5ebf9e791c82d5ca29a7ecad3b9cb5. 2024-11-13T13:33:24,910 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor bfeb2336aed7%2C40631%2C1731504792390.1731504804910 2024-11-13T13:33:24,919 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:33:24,919 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:33:24,919 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:33:24,919 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:33:24,919 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:33:24,920 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/52d71c14-a597-0de0-63d6-b75f134ce0ba/WALs/bfeb2336aed7,40631,1731504792390/bfeb2336aed7%2C40631%2C1731504792390.1731504793747 with entries=1, filesize=443 B; new WAL /user/jenkins/test-data/52d71c14-a597-0de0-63d6-b75f134ce0ba/WALs/bfeb2336aed7,40631,1731504792390/bfeb2336aed7%2C40631%2C1731504792390.1731504804910 2024-11-13T13:33:24,921 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45729:45729),(127.0.0.1/127.0.0.1:44455:44455)] 2024-11-13T13:33:24,921 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:38779/user/jenkins/test-data/52d71c14-a597-0de0-63d6-b75f134ce0ba/WALs/bfeb2336aed7,40631,1731504792390/bfeb2336aed7%2C40631%2C1731504792390.1731504793747 is not closed yet, will try archiving it next time 2024-11-13T13:33:24,924 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35783 is added to blk_1073741833_1009 (size=451) 2024-11-13T13:33:24,924 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35441 is added to blk_1073741833_1009 (size=451) 2024-11-13T13:33:24,926 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:38779/user/jenkins/test-data/52d71c14-a597-0de0-63d6-b75f134ce0ba/WALs/bfeb2336aed7,40631,1731504792390/bfeb2336aed7%2C40631%2C1731504792390.1731504793747 to hdfs://localhost:38779/user/jenkins/test-data/52d71c14-a597-0de0-63d6-b75f134ce0ba/oldWALs/bfeb2336aed7%2C40631%2C1731504792390.1731504793747 2024-11-13T13:33:24,931 DEBUG [RPCClient-NioEventLoopGroup-4-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testSlowSyncLogRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testSlowSyncLogRolling,,1731504794773.eb5ebf9e791c82d5ca29a7ecad3b9cb5., hostname=bfeb2336aed7,40631,1731504792390, seqNum=2] 2024-11-13T13:33:36,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40631 {}] regionserver.HRegion(8855): Flush requested on eb5ebf9e791c82d5ca29a7ecad3b9cb5 2024-11-13T13:33:36,995 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing eb5ebf9e791c82d5ca29a7ecad3b9cb5 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-13T13:33:37,110 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38779/user/jenkins/test-data/52d71c14-a597-0de0-63d6-b75f134ce0ba/data/default/TestLogRolling-testSlowSyncLogRolling/eb5ebf9e791c82d5ca29a7ecad3b9cb5/.tmp/info/a23c44369bff47c78cf152a91e195029 is 1080, key is row0001/info:/1731504804933/Put/seqid=0 2024-11-13T13:33:37,122 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35783 is added to blk_1073741838_1014 (size=12509) 2024-11-13T13:33:37,122 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35441 is added to blk_1073741838_1014 (size=12509) 2024-11-13T13:33:37,123 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:38779/user/jenkins/test-data/52d71c14-a597-0de0-63d6-b75f134ce0ba/data/default/TestLogRolling-testSlowSyncLogRolling/eb5ebf9e791c82d5ca29a7ecad3b9cb5/.tmp/info/a23c44369bff47c78cf152a91e195029 2024-11-13T13:33:37,169 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38779/user/jenkins/test-data/52d71c14-a597-0de0-63d6-b75f134ce0ba/data/default/TestLogRolling-testSlowSyncLogRolling/eb5ebf9e791c82d5ca29a7ecad3b9cb5/.tmp/info/a23c44369bff47c78cf152a91e195029 as hdfs://localhost:38779/user/jenkins/test-data/52d71c14-a597-0de0-63d6-b75f134ce0ba/data/default/TestLogRolling-testSlowSyncLogRolling/eb5ebf9e791c82d5ca29a7ecad3b9cb5/info/a23c44369bff47c78cf152a91e195029 2024-11-13T13:33:37,180 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38779/user/jenkins/test-data/52d71c14-a597-0de0-63d6-b75f134ce0ba/data/default/TestLogRolling-testSlowSyncLogRolling/eb5ebf9e791c82d5ca29a7ecad3b9cb5/info/a23c44369bff47c78cf152a91e195029, entries=7, sequenceid=11, filesize=12.2 K 2024-11-13T13:33:37,187 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for eb5ebf9e791c82d5ca29a7ecad3b9cb5 in 193ms, sequenceid=11, compaction requested=false 2024-11-13T13:33:37,188 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for eb5ebf9e791c82d5ca29a7ecad3b9cb5: 2024-11-13T13:33:40,698 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-13T13:33:45,007 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor bfeb2336aed7%2C40631%2C1731504792390.1731504825006 2024-11-13T13:33:45,225 INFO [Time-limited test {}] wal.AbstractFSWAL(1368): Slow sync cost: 212 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:35441,DS-1bae7c9b-f15f-412d-a5a8-9fed873b45ef,DISK], DatanodeInfoWithStorage[127.0.0.1:35783,DS-b4a134dd-46ba-4d6f-89a3-9669c7252571,DISK]] 2024-11-13T13:33:45,226 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:33:45,226 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:33:45,226 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:33:45,226 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:33:45,226 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:33:45,227 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/52d71c14-a597-0de0-63d6-b75f134ce0ba/WALs/bfeb2336aed7,40631,1731504792390/bfeb2336aed7%2C40631%2C1731504792390.1731504804910 with entries=12, filesize=12.10 KB; new WAL /user/jenkins/test-data/52d71c14-a597-0de0-63d6-b75f134ce0ba/WALs/bfeb2336aed7,40631,1731504792390/bfeb2336aed7%2C40631%2C1731504792390.1731504825006 2024-11-13T13:33:45,228 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45729:45729),(127.0.0.1/127.0.0.1:44455:44455)] 2024-11-13T13:33:45,228 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:38779/user/jenkins/test-data/52d71c14-a597-0de0-63d6-b75f134ce0ba/WALs/bfeb2336aed7,40631,1731504792390/bfeb2336aed7%2C40631%2C1731504792390.1731504804910 is not closed yet, will try archiving it next time 2024-11-13T13:33:45,230 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35783 is added to blk_1073741837_1013 (size=12399) 2024-11-13T13:33:45,230 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35441 is added to blk_1073741837_1013 (size=12399) 2024-11-13T13:33:45,433 INFO [FSHLog-0-hdfs://localhost:38779/user/jenkins/test-data/52d71c14-a597-0de0-63d6-b75f134ce0ba-prefix:bfeb2336aed7,40631,1731504792390 {}] wal.AbstractFSWAL(1368): Slow sync cost: 202 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:35441,DS-1bae7c9b-f15f-412d-a5a8-9fed873b45ef,DISK], DatanodeInfoWithStorage[127.0.0.1:35783,DS-b4a134dd-46ba-4d6f-89a3-9669c7252571,DISK]] 2024-11-13T13:33:47,641 INFO [FSHLog-0-hdfs://localhost:38779/user/jenkins/test-data/52d71c14-a597-0de0-63d6-b75f134ce0ba-prefix:bfeb2336aed7,40631,1731504792390 {}] wal.AbstractFSWAL(1368): Slow sync cost: 202 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:35441,DS-1bae7c9b-f15f-412d-a5a8-9fed873b45ef,DISK], DatanodeInfoWithStorage[127.0.0.1:35783,DS-b4a134dd-46ba-4d6f-89a3-9669c7252571,DISK]] 2024-11-13T13:33:49,847 INFO [FSHLog-0-hdfs://localhost:38779/user/jenkins/test-data/52d71c14-a597-0de0-63d6-b75f134ce0ba-prefix:bfeb2336aed7,40631,1731504792390 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:35441,DS-1bae7c9b-f15f-412d-a5a8-9fed873b45ef,DISK], DatanodeInfoWithStorage[127.0.0.1:35783,DS-b4a134dd-46ba-4d6f-89a3-9669c7252571,DISK]] 2024-11-13T13:33:52,054 INFO [FSHLog-0-hdfs://localhost:38779/user/jenkins/test-data/52d71c14-a597-0de0-63d6-b75f134ce0ba-prefix:bfeb2336aed7,40631,1731504792390 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:35441,DS-1bae7c9b-f15f-412d-a5a8-9fed873b45ef,DISK], DatanodeInfoWithStorage[127.0.0.1:35783,DS-b4a134dd-46ba-4d6f-89a3-9669c7252571,DISK]] 2024-11-13T13:33:52,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40631 {}] regionserver.HRegion(8855): Flush requested on eb5ebf9e791c82d5ca29a7ecad3b9cb5 2024-11-13T13:33:52,055 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing eb5ebf9e791c82d5ca29a7ecad3b9cb5 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-13T13:33:52,259 INFO [FSHLog-0-hdfs://localhost:38779/user/jenkins/test-data/52d71c14-a597-0de0-63d6-b75f134ce0ba-prefix:bfeb2336aed7,40631,1731504792390 {}] wal.AbstractFSWAL(1368): Slow sync cost: 202 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:35441,DS-1bae7c9b-f15f-412d-a5a8-9fed873b45ef,DISK], DatanodeInfoWithStorage[127.0.0.1:35783,DS-b4a134dd-46ba-4d6f-89a3-9669c7252571,DISK]] 2024-11-13T13:33:52,269 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38779/user/jenkins/test-data/52d71c14-a597-0de0-63d6-b75f134ce0ba/data/default/TestLogRolling-testSlowSyncLogRolling/eb5ebf9e791c82d5ca29a7ecad3b9cb5/.tmp/info/b676dd39633d4ec6a2ed5f2963d599b2 is 1080, key is row0008/info:/1731504818994/Put/seqid=0 2024-11-13T13:33:52,276 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35783 is added to blk_1073741840_1016 (size=12509) 2024-11-13T13:33:52,277 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35441 is added to blk_1073741840_1016 (size=12509) 2024-11-13T13:33:52,277 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=21 (bloomFilter=true), to=hdfs://localhost:38779/user/jenkins/test-data/52d71c14-a597-0de0-63d6-b75f134ce0ba/data/default/TestLogRolling-testSlowSyncLogRolling/eb5ebf9e791c82d5ca29a7ecad3b9cb5/.tmp/info/b676dd39633d4ec6a2ed5f2963d599b2 2024-11-13T13:33:52,290 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38779/user/jenkins/test-data/52d71c14-a597-0de0-63d6-b75f134ce0ba/data/default/TestLogRolling-testSlowSyncLogRolling/eb5ebf9e791c82d5ca29a7ecad3b9cb5/.tmp/info/b676dd39633d4ec6a2ed5f2963d599b2 as hdfs://localhost:38779/user/jenkins/test-data/52d71c14-a597-0de0-63d6-b75f134ce0ba/data/default/TestLogRolling-testSlowSyncLogRolling/eb5ebf9e791c82d5ca29a7ecad3b9cb5/info/b676dd39633d4ec6a2ed5f2963d599b2 2024-11-13T13:33:52,301 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38779/user/jenkins/test-data/52d71c14-a597-0de0-63d6-b75f134ce0ba/data/default/TestLogRolling-testSlowSyncLogRolling/eb5ebf9e791c82d5ca29a7ecad3b9cb5/info/b676dd39633d4ec6a2ed5f2963d599b2, entries=7, sequenceid=21, filesize=12.2 K 2024-11-13T13:33:52,503 INFO [FSHLog-0-hdfs://localhost:38779/user/jenkins/test-data/52d71c14-a597-0de0-63d6-b75f134ce0ba-prefix:bfeb2336aed7,40631,1731504792390 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:35441,DS-1bae7c9b-f15f-412d-a5a8-9fed873b45ef,DISK], DatanodeInfoWithStorage[127.0.0.1:35783,DS-b4a134dd-46ba-4d6f-89a3-9669c7252571,DISK]] 2024-11-13T13:33:52,503 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for eb5ebf9e791c82d5ca29a7ecad3b9cb5 in 449ms, sequenceid=21, compaction requested=false 2024-11-13T13:33:52,504 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for eb5ebf9e791c82d5ca29a7ecad3b9cb5: 2024-11-13T13:33:52,504 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=24.4 K, sizeToCheck=16.0 K 2024-11-13T13:33:52,504 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-13T13:33:52,505 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:38779/user/jenkins/test-data/52d71c14-a597-0de0-63d6-b75f134ce0ba/data/default/TestLogRolling-testSlowSyncLogRolling/eb5ebf9e791c82d5ca29a7ecad3b9cb5/info/a23c44369bff47c78cf152a91e195029 because midkey is the same as first or last row 2024-11-13T13:33:54,261 INFO [FSHLog-0-hdfs://localhost:38779/user/jenkins/test-data/52d71c14-a597-0de0-63d6-b75f134ce0ba-prefix:bfeb2336aed7,40631,1731504792390 {}] wal.AbstractFSWAL(1368): Slow sync cost: 202 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:35441,DS-1bae7c9b-f15f-412d-a5a8-9fed873b45ef,DISK], DatanodeInfoWithStorage[127.0.0.1:35783,DS-b4a134dd-46ba-4d6f-89a3-9669c7252571,DISK]] 2024-11-13T13:33:54,709 INFO [master/bfeb2336aed7:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-13T13:33:54,709 INFO [master/bfeb2336aed7:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-13T13:33:56,469 INFO [FSHLog-0-hdfs://localhost:38779/user/jenkins/test-data/52d71c14-a597-0de0-63d6-b75f134ce0ba-prefix:bfeb2336aed7,40631,1731504792390 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:35441,DS-1bae7c9b-f15f-412d-a5a8-9fed873b45ef,DISK], DatanodeInfoWithStorage[127.0.0.1:35783,DS-b4a134dd-46ba-4d6f-89a3-9669c7252571,DISK]] 2024-11-13T13:33:56,473 WARN [FSHLog-0-hdfs://localhost:38779/user/jenkins/test-data/52d71c14-a597-0de0-63d6-b75f134ce0ba-prefix:bfeb2336aed7,40631,1731504792390 {}] wal.AbstractFSWAL(2201): Requesting log roll because we exceeded slow sync threshold; count=8, threshold=5, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:35441,DS-1bae7c9b-f15f-412d-a5a8-9fed873b45ef,DISK], DatanodeInfoWithStorage[127.0.0.1:35783,DS-b4a134dd-46ba-4d6f-89a3-9669c7252571,DISK]] 2024-11-13T13:33:56,475 DEBUG [regionserver/bfeb2336aed7:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog bfeb2336aed7%2C40631%2C1731504792390:(num 1731504825006) roll requested 2024-11-13T13:33:56,476 INFO [regionserver/bfeb2336aed7:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor bfeb2336aed7%2C40631%2C1731504792390.1731504836476 2024-11-13T13:33:56,686 INFO [regionserver/bfeb2336aed7:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 206 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:35441,DS-1bae7c9b-f15f-412d-a5a8-9fed873b45ef,DISK], DatanodeInfoWithStorage[127.0.0.1:35783,DS-b4a134dd-46ba-4d6f-89a3-9669c7252571,DISK]] 2024-11-13T13:33:56,686 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:33:56,686 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:33:56,686 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:33:56,687 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:33:56,687 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:33:56,687 INFO [regionserver/bfeb2336aed7:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/52d71c14-a597-0de0-63d6-b75f134ce0ba/WALs/bfeb2336aed7,40631,1731504792390/bfeb2336aed7%2C40631%2C1731504792390.1731504825006 with entries=8, filesize=7.55 KB; new WAL /user/jenkins/test-data/52d71c14-a597-0de0-63d6-b75f134ce0ba/WALs/bfeb2336aed7,40631,1731504792390/bfeb2336aed7%2C40631%2C1731504792390.1731504836476 2024-11-13T13:33:56,689 DEBUG [regionserver/bfeb2336aed7:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45729:45729),(127.0.0.1/127.0.0.1:44455:44455)] 2024-11-13T13:33:56,689 DEBUG [regionserver/bfeb2336aed7:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:38779/user/jenkins/test-data/52d71c14-a597-0de0-63d6-b75f134ce0ba/WALs/bfeb2336aed7,40631,1731504792390/bfeb2336aed7%2C40631%2C1731504792390.1731504825006 is not closed yet, will try archiving it next time 2024-11-13T13:33:56,689 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:38779/user/jenkins/test-data/52d71c14-a597-0de0-63d6-b75f134ce0ba/WALs/bfeb2336aed7,40631,1731504792390/bfeb2336aed7%2C40631%2C1731504792390.1731504804910 to hdfs://localhost:38779/user/jenkins/test-data/52d71c14-a597-0de0-63d6-b75f134ce0ba/oldWALs/bfeb2336aed7%2C40631%2C1731504792390.1731504804910 2024-11-13T13:33:56,692 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35441 is added to blk_1073741839_1015 (size=7739) 2024-11-13T13:33:56,692 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35783 is added to blk_1073741839_1015 (size=7739) 2024-11-13T13:33:58,676 INFO [FSHLog-0-hdfs://localhost:38779/user/jenkins/test-data/52d71c14-a597-0de0-63d6-b75f134ce0ba-prefix:bfeb2336aed7,40631,1731504792390 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:35441,DS-1bae7c9b-f15f-412d-a5a8-9fed873b45ef,DISK], DatanodeInfoWithStorage[127.0.0.1:35783,DS-b4a134dd-46ba-4d6f-89a3-9669c7252571,DISK]] 2024-11-13T13:34:00,201 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region eb5ebf9e791c82d5ca29a7ecad3b9cb5, had cached 0 bytes from a total of 25018 2024-11-13T13:34:00,882 INFO [FSHLog-0-hdfs://localhost:38779/user/jenkins/test-data/52d71c14-a597-0de0-63d6-b75f134ce0ba-prefix:bfeb2336aed7,40631,1731504792390 {}] wal.AbstractFSWAL(1368): Slow sync cost: 202 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:35441,DS-1bae7c9b-f15f-412d-a5a8-9fed873b45ef,DISK], DatanodeInfoWithStorage[127.0.0.1:35783,DS-b4a134dd-46ba-4d6f-89a3-9669c7252571,DISK]] 2024-11-13T13:34:03,087 INFO [FSHLog-0-hdfs://localhost:38779/user/jenkins/test-data/52d71c14-a597-0de0-63d6-b75f134ce0ba-prefix:bfeb2336aed7,40631,1731504792390 {}] wal.AbstractFSWAL(1368): Slow sync cost: 200 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:35441,DS-1bae7c9b-f15f-412d-a5a8-9fed873b45ef,DISK], DatanodeInfoWithStorage[127.0.0.1:35783,DS-b4a134dd-46ba-4d6f-89a3-9669c7252571,DISK]] 2024-11-13T13:34:05,293 INFO [FSHLog-0-hdfs://localhost:38779/user/jenkins/test-data/52d71c14-a597-0de0-63d6-b75f134ce0ba-prefix:bfeb2336aed7,40631,1731504792390 {}] wal.AbstractFSWAL(1368): Slow sync cost: 202 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:35441,DS-1bae7c9b-f15f-412d-a5a8-9fed873b45ef,DISK], DatanodeInfoWithStorage[127.0.0.1:35783,DS-b4a134dd-46ba-4d6f-89a3-9669c7252571,DISK]] 2024-11-13T13:34:07,296 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-11-13T13:34:07,296 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor bfeb2336aed7%2C40631%2C1731504792390.1731504847296 2024-11-13T13:34:10,699 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-13T13:34:12,312 INFO [Time-limited test {}] wal.AbstractFSWAL(1368): Slow sync cost: 5013 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:35441,DS-1bae7c9b-f15f-412d-a5a8-9fed873b45ef,DISK], DatanodeInfoWithStorage[127.0.0.1:35783,DS-b4a134dd-46ba-4d6f-89a3-9669c7252571,DISK]] 2024-11-13T13:34:12,314 WARN [Time-limited test {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5013 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:35441,DS-1bae7c9b-f15f-412d-a5a8-9fed873b45ef,DISK], DatanodeInfoWithStorage[127.0.0.1:35783,DS-b4a134dd-46ba-4d6f-89a3-9669c7252571,DISK]] 2024-11-13T13:34:12,315 DEBUG [regionserver/bfeb2336aed7:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog bfeb2336aed7%2C40631%2C1731504792390:(num 1731504847296) roll requested 2024-11-13T13:34:12,315 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:34:12,315 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:34:12,315 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:34:12,315 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:34:12,315 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:34:12,316 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/52d71c14-a597-0de0-63d6-b75f134ce0ba/WALs/bfeb2336aed7,40631,1731504792390/bfeb2336aed7%2C40631%2C1731504792390.1731504836476 with entries=4, filesize=4.63 KB; new WAL /user/jenkins/test-data/52d71c14-a597-0de0-63d6-b75f134ce0ba/WALs/bfeb2336aed7,40631,1731504792390/bfeb2336aed7%2C40631%2C1731504792390.1731504847296 2024-11-13T13:34:12,317 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44455:44455),(127.0.0.1/127.0.0.1:45729:45729)] 2024-11-13T13:34:12,317 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:38779/user/jenkins/test-data/52d71c14-a597-0de0-63d6-b75f134ce0ba/WALs/bfeb2336aed7,40631,1731504792390/bfeb2336aed7%2C40631%2C1731504792390.1731504836476 is not closed yet, will try archiving it next time 2024-11-13T13:34:12,317 INFO [regionserver/bfeb2336aed7:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor bfeb2336aed7%2C40631%2C1731504792390.1731504852317 2024-11-13T13:34:12,319 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35783 is added to blk_1073741841_1017 (size=4753) 2024-11-13T13:34:12,320 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35441 is added to blk_1073741841_1017 (size=4753) 2024-11-13T13:34:17,321 INFO [FSHLog-0-hdfs://localhost:38779/user/jenkins/test-data/52d71c14-a597-0de0-63d6-b75f134ce0ba-prefix:bfeb2336aed7,40631,1731504792390 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5001 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:35783,DS-b4a134dd-46ba-4d6f-89a3-9669c7252571,DISK], DatanodeInfoWithStorage[127.0.0.1:35441,DS-1bae7c9b-f15f-412d-a5a8-9fed873b45ef,DISK]] 2024-11-13T13:34:17,321 WARN [FSHLog-0-hdfs://localhost:38779/user/jenkins/test-data/52d71c14-a597-0de0-63d6-b75f134ce0ba-prefix:bfeb2336aed7,40631,1731504792390 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5001 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:35783,DS-b4a134dd-46ba-4d6f-89a3-9669c7252571,DISK], DatanodeInfoWithStorage[127.0.0.1:35441,DS-1bae7c9b-f15f-412d-a5a8-9fed873b45ef,DISK]] 2024-11-13T13:34:17,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40631 {}] regionserver.HRegion(8855): Flush requested on eb5ebf9e791c82d5ca29a7ecad3b9cb5 2024-11-13T13:34:17,322 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing eb5ebf9e791c82d5ca29a7ecad3b9cb5 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-13T13:34:17,331 INFO [regionserver/bfeb2336aed7:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 5005 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:35783,DS-b4a134dd-46ba-4d6f-89a3-9669c7252571,DISK], DatanodeInfoWithStorage[127.0.0.1:35441,DS-1bae7c9b-f15f-412d-a5a8-9fed873b45ef,DISK]] 2024-11-13T13:34:17,331 WARN [regionserver/bfeb2336aed7:0.logRoller {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5005 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:35783,DS-b4a134dd-46ba-4d6f-89a3-9669c7252571,DISK], DatanodeInfoWithStorage[127.0.0.1:35441,DS-1bae7c9b-f15f-412d-a5a8-9fed873b45ef,DISK]] 2024-11-13T13:34:19,322 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-11-13T13:34:22,324 INFO [FSHLog-0-hdfs://localhost:38779/user/jenkins/test-data/52d71c14-a597-0de0-63d6-b75f134ce0ba-prefix:bfeb2336aed7,40631,1731504792390 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:35783,DS-b4a134dd-46ba-4d6f-89a3-9669c7252571,DISK], DatanodeInfoWithStorage[127.0.0.1:35441,DS-1bae7c9b-f15f-412d-a5a8-9fed873b45ef,DISK]] 2024-11-13T13:34:22,324 WARN [FSHLog-0-hdfs://localhost:38779/user/jenkins/test-data/52d71c14-a597-0de0-63d6-b75f134ce0ba-prefix:bfeb2336aed7,40631,1731504792390 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5000 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:35783,DS-b4a134dd-46ba-4d6f-89a3-9669c7252571,DISK], DatanodeInfoWithStorage[127.0.0.1:35441,DS-1bae7c9b-f15f-412d-a5a8-9fed873b45ef,DISK]] 2024-11-13T13:34:22,324 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:34:22,324 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:34:22,324 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:34:22,324 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:34:22,324 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:34:22,325 INFO [regionserver/bfeb2336aed7:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/52d71c14-a597-0de0-63d6-b75f134ce0ba/WALs/bfeb2336aed7,40631,1731504792390/bfeb2336aed7%2C40631%2C1731504792390.1731504847296 with entries=2, filesize=1.52 KB; new WAL /user/jenkins/test-data/52d71c14-a597-0de0-63d6-b75f134ce0ba/WALs/bfeb2336aed7,40631,1731504792390/bfeb2336aed7%2C40631%2C1731504792390.1731504852317 2024-11-13T13:34:22,325 DEBUG [regionserver/bfeb2336aed7:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45729:45729),(127.0.0.1/127.0.0.1:44455:44455)] 2024-11-13T13:34:22,326 DEBUG [regionserver/bfeb2336aed7:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:38779/user/jenkins/test-data/52d71c14-a597-0de0-63d6-b75f134ce0ba/WALs/bfeb2336aed7,40631,1731504792390/bfeb2336aed7%2C40631%2C1731504792390.1731504847296 is not closed yet, will try archiving it next time 2024-11-13T13:34:22,326 DEBUG [regionserver/bfeb2336aed7:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog bfeb2336aed7%2C40631%2C1731504792390:(num 1731504862326) roll requested 2024-11-13T13:34:22,326 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor bfeb2336aed7%2C40631%2C1731504792390.1731504862326 2024-11-13T13:34:22,327 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35441 is added to blk_1073741842_1018 (size=1569) 2024-11-13T13:34:22,327 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35783 is added to blk_1073741842_1018 (size=1569) 2024-11-13T13:34:22,329 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38779/user/jenkins/test-data/52d71c14-a597-0de0-63d6-b75f134ce0ba/data/default/TestLogRolling-testSlowSyncLogRolling/eb5ebf9e791c82d5ca29a7ecad3b9cb5/.tmp/info/b772d80379b24728a56bad5965cf2970 is 1080, key is row0015/info:/1731504834058/Put/seqid=0 2024-11-13T13:34:22,338 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35441 is added to blk_1073741845_1021 (size=12509) 2024-11-13T13:34:22,338 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35783 is added to blk_1073741845_1021 (size=12509) 2024-11-13T13:34:22,340 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:34:22,340 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=31 (bloomFilter=true), to=hdfs://localhost:38779/user/jenkins/test-data/52d71c14-a597-0de0-63d6-b75f134ce0ba/data/default/TestLogRolling-testSlowSyncLogRolling/eb5ebf9e791c82d5ca29a7ecad3b9cb5/.tmp/info/b772d80379b24728a56bad5965cf2970 2024-11-13T13:34:22,341 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:34:22,341 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:34:22,341 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:34:22,341 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:34:22,341 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/52d71c14-a597-0de0-63d6-b75f134ce0ba/WALs/bfeb2336aed7,40631,1731504792390/bfeb2336aed7%2C40631%2C1731504792390.1731504852317 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/52d71c14-a597-0de0-63d6-b75f134ce0ba/WALs/bfeb2336aed7,40631,1731504792390/bfeb2336aed7%2C40631%2C1731504792390.1731504862326 2024-11-13T13:34:22,343 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35783 is added to blk_1073741843_1019 (size=93) 2024-11-13T13:34:22,344 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35441 is added to blk_1073741843_1019 (size=93) 2024-11-13T13:34:22,344 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:38779/user/jenkins/test-data/52d71c14-a597-0de0-63d6-b75f134ce0ba/WALs/bfeb2336aed7,40631,1731504792390/bfeb2336aed7%2C40631%2C1731504792390.1731504852317 to hdfs://localhost:38779/user/jenkins/test-data/52d71c14-a597-0de0-63d6-b75f134ce0ba/oldWALs/bfeb2336aed7%2C40631%2C1731504792390.1731504852317 2024-11-13T13:34:22,345 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45729:45729),(127.0.0.1/127.0.0.1:44455:44455)] 2024-11-13T13:34:22,346 INFO [regionserver/bfeb2336aed7:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor bfeb2336aed7%2C40631%2C1731504792390.1731504862346 2024-11-13T13:34:22,363 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:34:22,363 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:34:22,363 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:34:22,363 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:34:22,363 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38779/user/jenkins/test-data/52d71c14-a597-0de0-63d6-b75f134ce0ba/data/default/TestLogRolling-testSlowSyncLogRolling/eb5ebf9e791c82d5ca29a7ecad3b9cb5/.tmp/info/b772d80379b24728a56bad5965cf2970 as hdfs://localhost:38779/user/jenkins/test-data/52d71c14-a597-0de0-63d6-b75f134ce0ba/data/default/TestLogRolling-testSlowSyncLogRolling/eb5ebf9e791c82d5ca29a7ecad3b9cb5/info/b772d80379b24728a56bad5965cf2970 2024-11-13T13:34:22,363 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:34:22,364 INFO [regionserver/bfeb2336aed7:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/52d71c14-a597-0de0-63d6-b75f134ce0ba/WALs/bfeb2336aed7,40631,1731504792390/bfeb2336aed7%2C40631%2C1731504792390.1731504862326 with entries=1, filesize=1.22 KB; new WAL /user/jenkins/test-data/52d71c14-a597-0de0-63d6-b75f134ce0ba/WALs/bfeb2336aed7,40631,1731504792390/bfeb2336aed7%2C40631%2C1731504792390.1731504862346 2024-11-13T13:34:22,366 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35783 is added to blk_1073741844_1020 (size=1258) 2024-11-13T13:34:22,366 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35441 is added to blk_1073741844_1020 (size=1258) 2024-11-13T13:34:22,369 DEBUG [regionserver/bfeb2336aed7:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44455:44455),(127.0.0.1/127.0.0.1:45729:45729)] 2024-11-13T13:34:22,374 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38779/user/jenkins/test-data/52d71c14-a597-0de0-63d6-b75f134ce0ba/data/default/TestLogRolling-testSlowSyncLogRolling/eb5ebf9e791c82d5ca29a7ecad3b9cb5/info/b772d80379b24728a56bad5965cf2970, entries=7, sequenceid=31, filesize=12.2 K 2024-11-13T13:34:22,375 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=1.05 KB/1076 for eb5ebf9e791c82d5ca29a7ecad3b9cb5 in 5054ms, sequenceid=31, compaction requested=true 2024-11-13T13:34:22,376 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for eb5ebf9e791c82d5ca29a7ecad3b9cb5: 2024-11-13T13:34:22,376 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=36.6 K, sizeToCheck=16.0 K 2024-11-13T13:34:22,376 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-13T13:34:22,376 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:38779/user/jenkins/test-data/52d71c14-a597-0de0-63d6-b75f134ce0ba/data/default/TestLogRolling-testSlowSyncLogRolling/eb5ebf9e791c82d5ca29a7ecad3b9cb5/info/a23c44369bff47c78cf152a91e195029 because midkey is the same as first or last row 2024-11-13T13:34:22,377 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store eb5ebf9e791c82d5ca29a7ecad3b9cb5:info, priority=-2147483648, current under compaction store size is 1 2024-11-13T13:34:22,379 DEBUG [RS:0;bfeb2336aed7:40631-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-13T13:34:22,379 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-13T13:34:22,382 DEBUG [RS:0;bfeb2336aed7:40631-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37527 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-13T13:34:22,383 DEBUG [RS:0;bfeb2336aed7:40631-shortCompactions-0 {}] regionserver.HStore(1541): eb5ebf9e791c82d5ca29a7ecad3b9cb5/info is initiating minor compaction (all files) 2024-11-13T13:34:22,384 INFO [RS:0;bfeb2336aed7:40631-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of eb5ebf9e791c82d5ca29a7ecad3b9cb5/info in TestLogRolling-testSlowSyncLogRolling,,1731504794773.eb5ebf9e791c82d5ca29a7ecad3b9cb5. 2024-11-13T13:34:22,384 INFO [RS:0;bfeb2336aed7:40631-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38779/user/jenkins/test-data/52d71c14-a597-0de0-63d6-b75f134ce0ba/data/default/TestLogRolling-testSlowSyncLogRolling/eb5ebf9e791c82d5ca29a7ecad3b9cb5/info/a23c44369bff47c78cf152a91e195029, hdfs://localhost:38779/user/jenkins/test-data/52d71c14-a597-0de0-63d6-b75f134ce0ba/data/default/TestLogRolling-testSlowSyncLogRolling/eb5ebf9e791c82d5ca29a7ecad3b9cb5/info/b676dd39633d4ec6a2ed5f2963d599b2, hdfs://localhost:38779/user/jenkins/test-data/52d71c14-a597-0de0-63d6-b75f134ce0ba/data/default/TestLogRolling-testSlowSyncLogRolling/eb5ebf9e791c82d5ca29a7ecad3b9cb5/info/b772d80379b24728a56bad5965cf2970] into tmpdir=hdfs://localhost:38779/user/jenkins/test-data/52d71c14-a597-0de0-63d6-b75f134ce0ba/data/default/TestLogRolling-testSlowSyncLogRolling/eb5ebf9e791c82d5ca29a7ecad3b9cb5/.tmp, totalSize=36.6 K 2024-11-13T13:34:22,386 DEBUG [RS:0;bfeb2336aed7:40631-shortCompactions-0 {}] compactions.Compactor(225): Compacting a23c44369bff47c78cf152a91e195029, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1731504804933 2024-11-13T13:34:22,387 DEBUG [RS:0;bfeb2336aed7:40631-shortCompactions-0 {}] compactions.Compactor(225): Compacting b676dd39633d4ec6a2ed5f2963d599b2, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=21, earliestPutTs=1731504818994 2024-11-13T13:34:22,387 DEBUG [RS:0;bfeb2336aed7:40631-shortCompactions-0 {}] compactions.Compactor(225): Compacting b772d80379b24728a56bad5965cf2970, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=31, earliestPutTs=1731504834058 2024-11-13T13:34:22,424 INFO [RS:0;bfeb2336aed7:40631-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): eb5ebf9e791c82d5ca29a7ecad3b9cb5#info#compaction#3 average throughput is 7.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-13T13:34:22,426 DEBUG [RS:0;bfeb2336aed7:40631-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38779/user/jenkins/test-data/52d71c14-a597-0de0-63d6-b75f134ce0ba/data/default/TestLogRolling-testSlowSyncLogRolling/eb5ebf9e791c82d5ca29a7ecad3b9cb5/.tmp/info/d600b149222d4f8d845ac31acba86bab is 1080, key is row0001/info:/1731504804933/Put/seqid=0 2024-11-13T13:34:22,437 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35441 is added to blk_1073741847_1023 (size=27710) 2024-11-13T13:34:22,437 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35783 is added to blk_1073741847_1023 (size=27710) 2024-11-13T13:34:22,452 DEBUG [RS:0;bfeb2336aed7:40631-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38779/user/jenkins/test-data/52d71c14-a597-0de0-63d6-b75f134ce0ba/data/default/TestLogRolling-testSlowSyncLogRolling/eb5ebf9e791c82d5ca29a7ecad3b9cb5/.tmp/info/d600b149222d4f8d845ac31acba86bab as hdfs://localhost:38779/user/jenkins/test-data/52d71c14-a597-0de0-63d6-b75f134ce0ba/data/default/TestLogRolling-testSlowSyncLogRolling/eb5ebf9e791c82d5ca29a7ecad3b9cb5/info/d600b149222d4f8d845ac31acba86bab 2024-11-13T13:34:22,474 INFO [RS:0;bfeb2336aed7:40631-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in eb5ebf9e791c82d5ca29a7ecad3b9cb5/info of eb5ebf9e791c82d5ca29a7ecad3b9cb5 into d600b149222d4f8d845ac31acba86bab(size=27.1 K), total size for store is 27.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-13T13:34:22,474 DEBUG [RS:0;bfeb2336aed7:40631-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for eb5ebf9e791c82d5ca29a7ecad3b9cb5: 2024-11-13T13:34:22,477 INFO [RS:0;bfeb2336aed7:40631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testSlowSyncLogRolling,,1731504794773.eb5ebf9e791c82d5ca29a7ecad3b9cb5., storeName=eb5ebf9e791c82d5ca29a7ecad3b9cb5/info, priority=13, startTime=1731504862377; duration=0sec 2024-11-13T13:34:22,477 DEBUG [RS:0;bfeb2336aed7:40631-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-11-13T13:34:22,477 DEBUG [RS:0;bfeb2336aed7:40631-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-13T13:34:22,477 DEBUG [RS:0;bfeb2336aed7:40631-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:38779/user/jenkins/test-data/52d71c14-a597-0de0-63d6-b75f134ce0ba/data/default/TestLogRolling-testSlowSyncLogRolling/eb5ebf9e791c82d5ca29a7ecad3b9cb5/info/d600b149222d4f8d845ac31acba86bab because midkey is the same as first or last row 2024-11-13T13:34:22,477 DEBUG [RS:0;bfeb2336aed7:40631-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-11-13T13:34:22,477 DEBUG [RS:0;bfeb2336aed7:40631-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-13T13:34:22,478 DEBUG [RS:0;bfeb2336aed7:40631-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:38779/user/jenkins/test-data/52d71c14-a597-0de0-63d6-b75f134ce0ba/data/default/TestLogRolling-testSlowSyncLogRolling/eb5ebf9e791c82d5ca29a7ecad3b9cb5/info/d600b149222d4f8d845ac31acba86bab because midkey is the same as first or last row 2024-11-13T13:34:22,478 DEBUG [RS:0;bfeb2336aed7:40631-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-11-13T13:34:22,478 DEBUG [RS:0;bfeb2336aed7:40631-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-13T13:34:22,478 DEBUG [RS:0;bfeb2336aed7:40631-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:38779/user/jenkins/test-data/52d71c14-a597-0de0-63d6-b75f134ce0ba/data/default/TestLogRolling-testSlowSyncLogRolling/eb5ebf9e791c82d5ca29a7ecad3b9cb5/info/d600b149222d4f8d845ac31acba86bab because midkey is the same as first or last row 2024-11-13T13:34:22,478 DEBUG [RS:0;bfeb2336aed7:40631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-13T13:34:22,478 DEBUG [RS:0;bfeb2336aed7:40631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: eb5ebf9e791c82d5ca29a7ecad3b9cb5:info 2024-11-13T13:34:34,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40631 {}] regionserver.HRegion(8855): Flush requested on eb5ebf9e791c82d5ca29a7ecad3b9cb5 2024-11-13T13:34:34,370 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing eb5ebf9e791c82d5ca29a7ecad3b9cb5 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-13T13:34:34,377 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38779/user/jenkins/test-data/52d71c14-a597-0de0-63d6-b75f134ce0ba/data/default/TestLogRolling-testSlowSyncLogRolling/eb5ebf9e791c82d5ca29a7ecad3b9cb5/.tmp/info/4117c2d1ca364d5aafaaf05c5a838971 is 1080, key is row0022/info:/1731504862347/Put/seqid=0 2024-11-13T13:34:34,388 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35783 is added to blk_1073741848_1024 (size=12509) 2024-11-13T13:34:34,389 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35441 is added to blk_1073741848_1024 (size=12509) 2024-11-13T13:34:34,790 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=42 (bloomFilter=true), to=hdfs://localhost:38779/user/jenkins/test-data/52d71c14-a597-0de0-63d6-b75f134ce0ba/data/default/TestLogRolling-testSlowSyncLogRolling/eb5ebf9e791c82d5ca29a7ecad3b9cb5/.tmp/info/4117c2d1ca364d5aafaaf05c5a838971 2024-11-13T13:34:34,802 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38779/user/jenkins/test-data/52d71c14-a597-0de0-63d6-b75f134ce0ba/data/default/TestLogRolling-testSlowSyncLogRolling/eb5ebf9e791c82d5ca29a7ecad3b9cb5/.tmp/info/4117c2d1ca364d5aafaaf05c5a838971 as hdfs://localhost:38779/user/jenkins/test-data/52d71c14-a597-0de0-63d6-b75f134ce0ba/data/default/TestLogRolling-testSlowSyncLogRolling/eb5ebf9e791c82d5ca29a7ecad3b9cb5/info/4117c2d1ca364d5aafaaf05c5a838971 2024-11-13T13:34:34,811 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38779/user/jenkins/test-data/52d71c14-a597-0de0-63d6-b75f134ce0ba/data/default/TestLogRolling-testSlowSyncLogRolling/eb5ebf9e791c82d5ca29a7ecad3b9cb5/info/4117c2d1ca364d5aafaaf05c5a838971, entries=7, sequenceid=42, filesize=12.2 K 2024-11-13T13:34:34,813 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for eb5ebf9e791c82d5ca29a7ecad3b9cb5 in 443ms, sequenceid=42, compaction requested=false 2024-11-13T13:34:34,813 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for eb5ebf9e791c82d5ca29a7ecad3b9cb5: 2024-11-13T13:34:34,813 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=39.3 K, sizeToCheck=16.0 K 2024-11-13T13:34:34,814 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-13T13:34:34,814 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:38779/user/jenkins/test-data/52d71c14-a597-0de0-63d6-b75f134ce0ba/data/default/TestLogRolling-testSlowSyncLogRolling/eb5ebf9e791c82d5ca29a7ecad3b9cb5/info/d600b149222d4f8d845ac31acba86bab because midkey is the same as first or last row 2024-11-13T13:34:40,699 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-13T13:34:42,381 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-13T13:34:42,382 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-13T13:34:42,382 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-13T13:34:42,387 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T13:34:42,388 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T13:34:42,388 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-13T13:34:42,388 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-13T13:34:42,388 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=2033038724, stopped=false 2024-11-13T13:34:42,389 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=bfeb2336aed7,37965,1731504791597 2024-11-13T13:34:42,444 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40631-0x1013466588e0001, quorum=127.0.0.1:49285, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-13T13:34:42,444 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37965-0x1013466588e0000, quorum=127.0.0.1:49285, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-13T13:34:42,444 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40631-0x1013466588e0001, quorum=127.0.0.1:49285, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T13:34:42,444 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37965-0x1013466588e0000, quorum=127.0.0.1:49285, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T13:34:42,444 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-13T13:34:42,444 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-13T13:34:42,445 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-13T13:34:42,445 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T13:34:42,445 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:37965-0x1013466588e0000, quorum=127.0.0.1:49285, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-13T13:34:42,445 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:40631-0x1013466588e0001, quorum=127.0.0.1:49285, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-13T13:34:42,445 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'bfeb2336aed7,40631,1731504792390' ***** 2024-11-13T13:34:42,445 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-13T13:34:42,446 INFO [RS:0;bfeb2336aed7:40631 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-13T13:34:42,446 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-13T13:34:42,446 INFO [RS:0;bfeb2336aed7:40631 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-13T13:34:42,446 INFO [RS:0;bfeb2336aed7:40631 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-13T13:34:42,446 INFO [RS:0;bfeb2336aed7:40631 {}] regionserver.HRegionServer(3091): Received CLOSE for eb5ebf9e791c82d5ca29a7ecad3b9cb5 2024-11-13T13:34:42,447 INFO [RS:0;bfeb2336aed7:40631 {}] regionserver.HRegionServer(959): stopping server bfeb2336aed7,40631,1731504792390 2024-11-13T13:34:42,447 INFO [RS:0;bfeb2336aed7:40631 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-13T13:34:42,447 INFO [RS:0;bfeb2336aed7:40631 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;bfeb2336aed7:40631. 2024-11-13T13:34:42,447 DEBUG [RS:0;bfeb2336aed7:40631 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-13T13:34:42,447 DEBUG [RS:0;bfeb2336aed7:40631 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T13:34:42,447 DEBUG [RS_CLOSE_REGION-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing eb5ebf9e791c82d5ca29a7ecad3b9cb5, disabling compactions & flushes 2024-11-13T13:34:42,447 INFO [RS_CLOSE_REGION-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testSlowSyncLogRolling,,1731504794773.eb5ebf9e791c82d5ca29a7ecad3b9cb5. 2024-11-13T13:34:42,447 INFO [RS:0;bfeb2336aed7:40631 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-13T13:34:42,447 DEBUG [RS_CLOSE_REGION-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testSlowSyncLogRolling,,1731504794773.eb5ebf9e791c82d5ca29a7ecad3b9cb5. 2024-11-13T13:34:42,447 INFO [RS:0;bfeb2336aed7:40631 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-13T13:34:42,447 DEBUG [RS_CLOSE_REGION-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testSlowSyncLogRolling,,1731504794773.eb5ebf9e791c82d5ca29a7ecad3b9cb5. after waiting 0 ms 2024-11-13T13:34:42,447 INFO [RS:0;bfeb2336aed7:40631 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-13T13:34:42,447 DEBUG [RS_CLOSE_REGION-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testSlowSyncLogRolling,,1731504794773.eb5ebf9e791c82d5ca29a7ecad3b9cb5. 2024-11-13T13:34:42,448 INFO [RS:0;bfeb2336aed7:40631 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-13T13:34:42,448 INFO [RS_CLOSE_REGION-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing eb5ebf9e791c82d5ca29a7ecad3b9cb5 1/1 column families, dataSize=3.15 KB heapSize=3.63 KB 2024-11-13T13:34:42,448 INFO [RS:0;bfeb2336aed7:40631 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-13T13:34:42,448 DEBUG [RS_CLOSE_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-13T13:34:42,448 DEBUG [RS:0;bfeb2336aed7:40631 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740, eb5ebf9e791c82d5ca29a7ecad3b9cb5=TestLogRolling-testSlowSyncLogRolling,,1731504794773.eb5ebf9e791c82d5ca29a7ecad3b9cb5.} 2024-11-13T13:34:42,448 INFO [RS_CLOSE_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-13T13:34:42,448 DEBUG [RS_CLOSE_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-13T13:34:42,448 DEBUG [RS_CLOSE_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-13T13:34:42,448 DEBUG [RS_CLOSE_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-13T13:34:42,448 DEBUG [RS:0;bfeb2336aed7:40631 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, eb5ebf9e791c82d5ca29a7ecad3b9cb5 2024-11-13T13:34:42,449 INFO [RS_CLOSE_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.65 KB heapSize=3.67 KB 2024-11-13T13:34:42,462 DEBUG [RS_CLOSE_REGION-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38779/user/jenkins/test-data/52d71c14-a597-0de0-63d6-b75f134ce0ba/data/default/TestLogRolling-testSlowSyncLogRolling/eb5ebf9e791c82d5ca29a7ecad3b9cb5/.tmp/info/0696ba3bb8a64abcbe4fc090a3eb17d0 is 1080, key is row0029/info:/1731504876372/Put/seqid=0 2024-11-13T13:34:42,470 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35441 is added to blk_1073741849_1025 (size=8193) 2024-11-13T13:34:42,471 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35783 is added to blk_1073741849_1025 (size=8193) 2024-11-13T13:34:42,472 INFO [RS_CLOSE_REGION-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.15 KB at sequenceid=48 (bloomFilter=true), to=hdfs://localhost:38779/user/jenkins/test-data/52d71c14-a597-0de0-63d6-b75f134ce0ba/data/default/TestLogRolling-testSlowSyncLogRolling/eb5ebf9e791c82d5ca29a7ecad3b9cb5/.tmp/info/0696ba3bb8a64abcbe4fc090a3eb17d0 2024-11-13T13:34:42,475 DEBUG [RS_CLOSE_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38779/user/jenkins/test-data/52d71c14-a597-0de0-63d6-b75f134ce0ba/data/hbase/meta/1588230740/.tmp/info/5a7a6d9bcbbd4a5aa4929132e7f4d6e8 is 195, key is TestLogRolling-testSlowSyncLogRolling,,1731504794773.eb5ebf9e791c82d5ca29a7ecad3b9cb5./info:regioninfo/1731504795231/Put/seqid=0 2024-11-13T13:34:42,483 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35783 is added to blk_1073741850_1026 (size=7016) 2024-11-13T13:34:42,483 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35441 is added to blk_1073741850_1026 (size=7016) 2024-11-13T13:34:42,484 DEBUG [RS_CLOSE_REGION-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38779/user/jenkins/test-data/52d71c14-a597-0de0-63d6-b75f134ce0ba/data/default/TestLogRolling-testSlowSyncLogRolling/eb5ebf9e791c82d5ca29a7ecad3b9cb5/.tmp/info/0696ba3bb8a64abcbe4fc090a3eb17d0 as hdfs://localhost:38779/user/jenkins/test-data/52d71c14-a597-0de0-63d6-b75f134ce0ba/data/default/TestLogRolling-testSlowSyncLogRolling/eb5ebf9e791c82d5ca29a7ecad3b9cb5/info/0696ba3bb8a64abcbe4fc090a3eb17d0 2024-11-13T13:34:42,485 INFO [RS_CLOSE_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.45 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:38779/user/jenkins/test-data/52d71c14-a597-0de0-63d6-b75f134ce0ba/data/hbase/meta/1588230740/.tmp/info/5a7a6d9bcbbd4a5aa4929132e7f4d6e8 2024-11-13T13:34:42,493 INFO [RS_CLOSE_REGION-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38779/user/jenkins/test-data/52d71c14-a597-0de0-63d6-b75f134ce0ba/data/default/TestLogRolling-testSlowSyncLogRolling/eb5ebf9e791c82d5ca29a7ecad3b9cb5/info/0696ba3bb8a64abcbe4fc090a3eb17d0, entries=3, sequenceid=48, filesize=8.0 K 2024-11-13T13:34:42,494 INFO [RS_CLOSE_REGION-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~3.15 KB/3228, heapSize ~3.61 KB/3696, currentSize=0 B/0 for eb5ebf9e791c82d5ca29a7ecad3b9cb5 in 46ms, sequenceid=48, compaction requested=true 2024-11-13T13:34:42,495 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731504794773.eb5ebf9e791c82d5ca29a7ecad3b9cb5.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:38779/user/jenkins/test-data/52d71c14-a597-0de0-63d6-b75f134ce0ba/data/default/TestLogRolling-testSlowSyncLogRolling/eb5ebf9e791c82d5ca29a7ecad3b9cb5/info/a23c44369bff47c78cf152a91e195029, hdfs://localhost:38779/user/jenkins/test-data/52d71c14-a597-0de0-63d6-b75f134ce0ba/data/default/TestLogRolling-testSlowSyncLogRolling/eb5ebf9e791c82d5ca29a7ecad3b9cb5/info/b676dd39633d4ec6a2ed5f2963d599b2, hdfs://localhost:38779/user/jenkins/test-data/52d71c14-a597-0de0-63d6-b75f134ce0ba/data/default/TestLogRolling-testSlowSyncLogRolling/eb5ebf9e791c82d5ca29a7ecad3b9cb5/info/b772d80379b24728a56bad5965cf2970] to archive 2024-11-13T13:34:42,499 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731504794773.eb5ebf9e791c82d5ca29a7ecad3b9cb5.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-13T13:34:42,504 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731504794773.eb5ebf9e791c82d5ca29a7ecad3b9cb5.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38779/user/jenkins/test-data/52d71c14-a597-0de0-63d6-b75f134ce0ba/data/default/TestLogRolling-testSlowSyncLogRolling/eb5ebf9e791c82d5ca29a7ecad3b9cb5/info/a23c44369bff47c78cf152a91e195029 to hdfs://localhost:38779/user/jenkins/test-data/52d71c14-a597-0de0-63d6-b75f134ce0ba/archive/data/default/TestLogRolling-testSlowSyncLogRolling/eb5ebf9e791c82d5ca29a7ecad3b9cb5/info/a23c44369bff47c78cf152a91e195029 2024-11-13T13:34:42,506 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731504794773.eb5ebf9e791c82d5ca29a7ecad3b9cb5.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38779/user/jenkins/test-data/52d71c14-a597-0de0-63d6-b75f134ce0ba/data/default/TestLogRolling-testSlowSyncLogRolling/eb5ebf9e791c82d5ca29a7ecad3b9cb5/info/b676dd39633d4ec6a2ed5f2963d599b2 to hdfs://localhost:38779/user/jenkins/test-data/52d71c14-a597-0de0-63d6-b75f134ce0ba/archive/data/default/TestLogRolling-testSlowSyncLogRolling/eb5ebf9e791c82d5ca29a7ecad3b9cb5/info/b676dd39633d4ec6a2ed5f2963d599b2 2024-11-13T13:34:42,510 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731504794773.eb5ebf9e791c82d5ca29a7ecad3b9cb5.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38779/user/jenkins/test-data/52d71c14-a597-0de0-63d6-b75f134ce0ba/data/default/TestLogRolling-testSlowSyncLogRolling/eb5ebf9e791c82d5ca29a7ecad3b9cb5/info/b772d80379b24728a56bad5965cf2970 to hdfs://localhost:38779/user/jenkins/test-data/52d71c14-a597-0de0-63d6-b75f134ce0ba/archive/data/default/TestLogRolling-testSlowSyncLogRolling/eb5ebf9e791c82d5ca29a7ecad3b9cb5/info/b772d80379b24728a56bad5965cf2970 2024-11-13T13:34:42,512 DEBUG [RS_CLOSE_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38779/user/jenkins/test-data/52d71c14-a597-0de0-63d6-b75f134ce0ba/data/hbase/meta/1588230740/.tmp/ns/a9f6bea52fe4499ea84c22c41db526dc is 43, key is default/ns:d/1731504794594/Put/seqid=0 2024-11-13T13:34:42,522 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731504794773.eb5ebf9e791c82d5ca29a7ecad3b9cb5.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=bfeb2336aed7:37965 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 16 more 2024-11-13T13:34:42,528 WARN [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731504794773.eb5ebf9e791c82d5ca29a7ecad3b9cb5.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [a23c44369bff47c78cf152a91e195029=12509, b676dd39633d4ec6a2ed5f2963d599b2=12509, b772d80379b24728a56bad5965cf2970=12509] 2024-11-13T13:34:42,529 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35783 is added to blk_1073741851_1027 (size=5153) 2024-11-13T13:34:42,529 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35441 is added to blk_1073741851_1027 (size=5153) 2024-11-13T13:34:42,546 DEBUG [RS_CLOSE_REGION-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38779/user/jenkins/test-data/52d71c14-a597-0de0-63d6-b75f134ce0ba/data/default/TestLogRolling-testSlowSyncLogRolling/eb5ebf9e791c82d5ca29a7ecad3b9cb5/recovered.edits/51.seqid, newMaxSeqId=51, maxSeqId=1 2024-11-13T13:34:42,548 INFO [RS_CLOSE_REGION-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testSlowSyncLogRolling,,1731504794773.eb5ebf9e791c82d5ca29a7ecad3b9cb5. 2024-11-13T13:34:42,548 DEBUG [RS_CLOSE_REGION-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for eb5ebf9e791c82d5ca29a7ecad3b9cb5: Waiting for close lock at 1731504882447Running coprocessor pre-close hooks at 1731504882447Disabling compacts and flushes for region at 1731504882447Disabling writes for close at 1731504882447Obtaining lock to block concurrent updates at 1731504882448 (+1 ms)Preparing flush snapshotting stores in eb5ebf9e791c82d5ca29a7ecad3b9cb5 at 1731504882448Finished memstore snapshotting TestLogRolling-testSlowSyncLogRolling,,1731504794773.eb5ebf9e791c82d5ca29a7ecad3b9cb5., syncing WAL and waiting on mvcc, flushsize=dataSize=3228, getHeapSize=3696, getOffHeapSize=0, getCellsCount=3 at 1731504882448Flushing stores of TestLogRolling-testSlowSyncLogRolling,,1731504794773.eb5ebf9e791c82d5ca29a7ecad3b9cb5. at 1731504882449 (+1 ms)Flushing eb5ebf9e791c82d5ca29a7ecad3b9cb5/info: creating writer at 1731504882449Flushing eb5ebf9e791c82d5ca29a7ecad3b9cb5/info: appending metadata at 1731504882461 (+12 ms)Flushing eb5ebf9e791c82d5ca29a7ecad3b9cb5/info: closing flushed file at 1731504882461Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@660a2fc3: reopening flushed file at 1731504882483 (+22 ms)Finished flush of dataSize ~3.15 KB/3228, heapSize ~3.61 KB/3696, currentSize=0 B/0 for eb5ebf9e791c82d5ca29a7ecad3b9cb5 in 46ms, sequenceid=48, compaction requested=true at 1731504882494 (+11 ms)Writing region close event to WAL at 1731504882540 (+46 ms)Running coprocessor post-close hooks at 1731504882546 (+6 ms)Closed at 1731504882548 (+2 ms) 2024-11-13T13:34:42,548 DEBUG [RS_CLOSE_REGION-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testSlowSyncLogRolling,,1731504794773.eb5ebf9e791c82d5ca29a7ecad3b9cb5. 2024-11-13T13:34:42,647 INFO [regionserver/bfeb2336aed7:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-13T13:34:42,647 INFO [regionserver/bfeb2336aed7:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-13T13:34:42,649 DEBUG [RS:0;bfeb2336aed7:40631 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-13T13:34:42,849 DEBUG [RS:0;bfeb2336aed7:40631 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-13T13:34:42,930 INFO [RS_CLOSE_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:38779/user/jenkins/test-data/52d71c14-a597-0de0-63d6-b75f134ce0ba/data/hbase/meta/1588230740/.tmp/ns/a9f6bea52fe4499ea84c22c41db526dc 2024-11-13T13:34:42,954 DEBUG [RS_CLOSE_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38779/user/jenkins/test-data/52d71c14-a597-0de0-63d6-b75f134ce0ba/data/hbase/meta/1588230740/.tmp/table/a899a15fffde4bd0b86efbffe355c0a0 is 73, key is TestLogRolling-testSlowSyncLogRolling/table:state/1731504795248/Put/seqid=0 2024-11-13T13:34:42,960 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35441 is added to blk_1073741852_1028 (size=5396) 2024-11-13T13:34:42,960 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35783 is added to blk_1073741852_1028 (size=5396) 2024-11-13T13:34:42,961 INFO [RS_CLOSE_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=138 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:38779/user/jenkins/test-data/52d71c14-a597-0de0-63d6-b75f134ce0ba/data/hbase/meta/1588230740/.tmp/table/a899a15fffde4bd0b86efbffe355c0a0 2024-11-13T13:34:42,970 DEBUG [RS_CLOSE_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38779/user/jenkins/test-data/52d71c14-a597-0de0-63d6-b75f134ce0ba/data/hbase/meta/1588230740/.tmp/info/5a7a6d9bcbbd4a5aa4929132e7f4d6e8 as hdfs://localhost:38779/user/jenkins/test-data/52d71c14-a597-0de0-63d6-b75f134ce0ba/data/hbase/meta/1588230740/info/5a7a6d9bcbbd4a5aa4929132e7f4d6e8 2024-11-13T13:34:42,979 INFO [RS_CLOSE_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38779/user/jenkins/test-data/52d71c14-a597-0de0-63d6-b75f134ce0ba/data/hbase/meta/1588230740/info/5a7a6d9bcbbd4a5aa4929132e7f4d6e8, entries=10, sequenceid=11, filesize=6.9 K 2024-11-13T13:34:42,981 DEBUG [RS_CLOSE_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38779/user/jenkins/test-data/52d71c14-a597-0de0-63d6-b75f134ce0ba/data/hbase/meta/1588230740/.tmp/ns/a9f6bea52fe4499ea84c22c41db526dc as hdfs://localhost:38779/user/jenkins/test-data/52d71c14-a597-0de0-63d6-b75f134ce0ba/data/hbase/meta/1588230740/ns/a9f6bea52fe4499ea84c22c41db526dc 2024-11-13T13:34:42,991 INFO [RS_CLOSE_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38779/user/jenkins/test-data/52d71c14-a597-0de0-63d6-b75f134ce0ba/data/hbase/meta/1588230740/ns/a9f6bea52fe4499ea84c22c41db526dc, entries=2, sequenceid=11, filesize=5.0 K 2024-11-13T13:34:42,992 DEBUG [RS_CLOSE_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38779/user/jenkins/test-data/52d71c14-a597-0de0-63d6-b75f134ce0ba/data/hbase/meta/1588230740/.tmp/table/a899a15fffde4bd0b86efbffe355c0a0 as hdfs://localhost:38779/user/jenkins/test-data/52d71c14-a597-0de0-63d6-b75f134ce0ba/data/hbase/meta/1588230740/table/a899a15fffde4bd0b86efbffe355c0a0 2024-11-13T13:34:43,000 INFO [RS_CLOSE_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38779/user/jenkins/test-data/52d71c14-a597-0de0-63d6-b75f134ce0ba/data/hbase/meta/1588230740/table/a899a15fffde4bd0b86efbffe355c0a0, entries=2, sequenceid=11, filesize=5.3 K 2024-11-13T13:34:43,002 INFO [RS_CLOSE_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.65 KB/1692, heapSize ~3.38 KB/3456, currentSize=0 B/0 for 1588230740 in 553ms, sequenceid=11, compaction requested=false 2024-11-13T13:34:43,007 DEBUG [RS_CLOSE_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38779/user/jenkins/test-data/52d71c14-a597-0de0-63d6-b75f134ce0ba/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-13T13:34:43,008 DEBUG [RS_CLOSE_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-13T13:34:43,008 INFO [RS_CLOSE_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-13T13:34:43,008 DEBUG [RS_CLOSE_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731504882448Running coprocessor pre-close hooks at 1731504882448Disabling compacts and flushes for region at 1731504882448Disabling writes for close at 1731504882448Obtaining lock to block concurrent updates at 1731504882449 (+1 ms)Preparing flush snapshotting stores in 1588230740 at 1731504882449Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1692, getHeapSize=3696, getOffHeapSize=0, getCellsCount=14 at 1731504882449Flushing stores of hbase:meta,,1.1588230740 at 1731504882450 (+1 ms)Flushing 1588230740/info: creating writer at 1731504882450Flushing 1588230740/info: appending metadata at 1731504882475 (+25 ms)Flushing 1588230740/info: closing flushed file at 1731504882475Flushing 1588230740/ns: creating writer at 1731504882493 (+18 ms)Flushing 1588230740/ns: appending metadata at 1731504882511 (+18 ms)Flushing 1588230740/ns: closing flushed file at 1731504882512 (+1 ms)Flushing 1588230740/table: creating writer at 1731504882938 (+426 ms)Flushing 1588230740/table: appending metadata at 1731504882954 (+16 ms)Flushing 1588230740/table: closing flushed file at 1731504882954Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3e0eb3c0: reopening flushed file at 1731504882969 (+15 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@779d1d20: reopening flushed file at 1731504882979 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3f36e05a: reopening flushed file at 1731504882991 (+12 ms)Finished flush of dataSize ~1.65 KB/1692, heapSize ~3.38 KB/3456, currentSize=0 B/0 for 1588230740 in 553ms, sequenceid=11, compaction requested=false at 1731504883002 (+11 ms)Writing region close event to WAL at 1731504883003 (+1 ms)Running coprocessor post-close hooks at 1731504883008 (+5 ms)Closed at 1731504883008 2024-11-13T13:34:43,009 DEBUG [RS_CLOSE_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-13T13:34:43,050 INFO [RS:0;bfeb2336aed7:40631 {}] regionserver.HRegionServer(976): stopping server bfeb2336aed7,40631,1731504792390; all regions closed. 2024-11-13T13:34:43,051 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:34:43,051 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:34:43,051 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:34:43,051 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:34:43,052 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:34:43,054 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35783 is added to blk_1073741834_1010 (size=3066) 2024-11-13T13:34:43,054 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35441 is added to blk_1073741834_1010 (size=3066) 2024-11-13T13:34:43,057 DEBUG [RS:0;bfeb2336aed7:40631 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/52d71c14-a597-0de0-63d6-b75f134ce0ba/oldWALs 2024-11-13T13:34:43,058 INFO [RS:0;bfeb2336aed7:40631 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog bfeb2336aed7%2C40631%2C1731504792390.meta:.meta(num 1731504794387) 2024-11-13T13:34:43,058 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:34:43,058 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:34:43,058 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:34:43,058 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:34:43,059 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:34:43,061 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35441 is added to blk_1073741846_1022 (size=13040) 2024-11-13T13:34:43,061 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35783 is added to blk_1073741846_1022 (size=13040) 2024-11-13T13:34:43,068 DEBUG [RS:0;bfeb2336aed7:40631 {}] wal.AbstractFSWAL(1256): Moved 5 WAL file(s) to /user/jenkins/test-data/52d71c14-a597-0de0-63d6-b75f134ce0ba/oldWALs 2024-11-13T13:34:43,068 INFO [RS:0;bfeb2336aed7:40631 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog bfeb2336aed7%2C40631%2C1731504792390:(num 1731504862346) 2024-11-13T13:34:43,068 DEBUG [RS:0;bfeb2336aed7:40631 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T13:34:43,068 INFO [RS:0;bfeb2336aed7:40631 {}] regionserver.LeaseManager(133): Closed leases 2024-11-13T13:34:43,068 INFO [RS:0;bfeb2336aed7:40631 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-13T13:34:43,068 INFO [RS:0;bfeb2336aed7:40631 {}] hbase.ChoreService(370): Chore service for: regionserver/bfeb2336aed7:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-13T13:34:43,069 INFO [RS:0;bfeb2336aed7:40631 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-13T13:34:43,069 INFO [regionserver/bfeb2336aed7:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-13T13:34:43,069 INFO [RS:0;bfeb2336aed7:40631 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:40631 2024-11-13T13:34:43,118 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37965-0x1013466588e0000, quorum=127.0.0.1:49285, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-13T13:34:43,118 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40631-0x1013466588e0001, quorum=127.0.0.1:49285, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/bfeb2336aed7,40631,1731504792390 2024-11-13T13:34:43,118 INFO [RS:0;bfeb2336aed7:40631 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-13T13:34:43,119 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [bfeb2336aed7,40631,1731504792390] 2024-11-13T13:34:43,138 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/bfeb2336aed7,40631,1731504792390 already deleted, retry=false 2024-11-13T13:34:43,139 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; bfeb2336aed7,40631,1731504792390 expired; onlineServers=0 2024-11-13T13:34:43,139 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'bfeb2336aed7,37965,1731504791597' ***** 2024-11-13T13:34:43,139 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-13T13:34:43,139 INFO [M:0;bfeb2336aed7:37965 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-13T13:34:43,139 INFO [M:0;bfeb2336aed7:37965 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-13T13:34:43,139 DEBUG [M:0;bfeb2336aed7:37965 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-13T13:34:43,139 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-13T13:34:43,139 DEBUG [M:0;bfeb2336aed7:37965 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-13T13:34:43,139 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster-HFileCleaner.large.0-1731504793474 {}] cleaner.HFileCleaner(306): Exit Thread[master/bfeb2336aed7:0:becomeActiveMaster-HFileCleaner.large.0-1731504793474,5,FailOnTimeoutGroup] 2024-11-13T13:34:43,139 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster-HFileCleaner.small.0-1731504793475 {}] cleaner.HFileCleaner(306): Exit Thread[master/bfeb2336aed7:0:becomeActiveMaster-HFileCleaner.small.0-1731504793475,5,FailOnTimeoutGroup] 2024-11-13T13:34:43,140 INFO [M:0;bfeb2336aed7:37965 {}] hbase.ChoreService(370): Chore service for: master/bfeb2336aed7:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-13T13:34:43,140 INFO [M:0;bfeb2336aed7:37965 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-13T13:34:43,140 DEBUG [M:0;bfeb2336aed7:37965 {}] master.HMaster(1795): Stopping service threads 2024-11-13T13:34:43,140 INFO [M:0;bfeb2336aed7:37965 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-13T13:34:43,140 INFO [M:0;bfeb2336aed7:37965 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-13T13:34:43,141 INFO [M:0;bfeb2336aed7:37965 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-13T13:34:43,141 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-13T13:34:43,149 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37965-0x1013466588e0000, quorum=127.0.0.1:49285, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-13T13:34:43,149 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37965-0x1013466588e0000, quorum=127.0.0.1:49285, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T13:34:43,149 DEBUG [M:0;bfeb2336aed7:37965 {}] zookeeper.ZKUtil(347): master:37965-0x1013466588e0000, quorum=127.0.0.1:49285, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-13T13:34:43,149 WARN [M:0;bfeb2336aed7:37965 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-13T13:34:43,151 INFO [M:0;bfeb2336aed7:37965 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:38779/user/jenkins/test-data/52d71c14-a597-0de0-63d6-b75f134ce0ba/.lastflushedseqids 2024-11-13T13:34:43,162 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35783 is added to blk_1073741853_1029 (size=130) 2024-11-13T13:34:43,162 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35441 is added to blk_1073741853_1029 (size=130) 2024-11-13T13:34:43,163 INFO [M:0;bfeb2336aed7:37965 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-13T13:34:43,163 INFO [M:0;bfeb2336aed7:37965 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-13T13:34:43,163 DEBUG [M:0;bfeb2336aed7:37965 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-13T13:34:43,163 INFO [M:0;bfeb2336aed7:37965 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T13:34:43,163 DEBUG [M:0;bfeb2336aed7:37965 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T13:34:43,163 DEBUG [M:0;bfeb2336aed7:37965 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-13T13:34:43,163 DEBUG [M:0;bfeb2336aed7:37965 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T13:34:43,163 INFO [M:0;bfeb2336aed7:37965 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.02 KB heapSize=29.20 KB 2024-11-13T13:34:43,182 DEBUG [M:0;bfeb2336aed7:37965 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38779/user/jenkins/test-data/52d71c14-a597-0de0-63d6-b75f134ce0ba/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/53ff1da960cd41398bb409d9aa72e32b is 82, key is hbase:meta,,1/info:regioninfo/1731504794452/Put/seqid=0 2024-11-13T13:34:43,190 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35783 is added to blk_1073741854_1030 (size=5672) 2024-11-13T13:34:43,190 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35441 is added to blk_1073741854_1030 (size=5672) 2024-11-13T13:34:43,191 INFO [M:0;bfeb2336aed7:37965 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:38779/user/jenkins/test-data/52d71c14-a597-0de0-63d6-b75f134ce0ba/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/53ff1da960cd41398bb409d9aa72e32b 2024-11-13T13:34:43,215 DEBUG [M:0;bfeb2336aed7:37965 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38779/user/jenkins/test-data/52d71c14-a597-0de0-63d6-b75f134ce0ba/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/cc37a2232f4047eeb669ec284ebf1030 is 766, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1731504795256/Put/seqid=0 2024-11-13T13:34:43,222 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35441 is added to blk_1073741855_1031 (size=6247) 2024-11-13T13:34:43,222 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35783 is added to blk_1073741855_1031 (size=6247) 2024-11-13T13:34:43,223 INFO [M:0;bfeb2336aed7:37965 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.42 KB at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:38779/user/jenkins/test-data/52d71c14-a597-0de0-63d6-b75f134ce0ba/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/cc37a2232f4047eeb669ec284ebf1030 2024-11-13T13:34:43,229 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40631-0x1013466588e0001, quorum=127.0.0.1:49285, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-13T13:34:43,229 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40631-0x1013466588e0001, quorum=127.0.0.1:49285, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-13T13:34:43,229 INFO [RS:0;bfeb2336aed7:40631 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-13T13:34:43,229 INFO [M:0;bfeb2336aed7:37965 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for cc37a2232f4047eeb669ec284ebf1030 2024-11-13T13:34:43,229 INFO [RS:0;bfeb2336aed7:40631 {}] regionserver.HRegionServer(1031): Exiting; stopping=bfeb2336aed7,40631,1731504792390; zookeeper connection closed. 2024-11-13T13:34:43,229 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@5186ed0f {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@5186ed0f 2024-11-13T13:34:43,230 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-13T13:34:43,246 DEBUG [M:0;bfeb2336aed7:37965 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38779/user/jenkins/test-data/52d71c14-a597-0de0-63d6-b75f134ce0ba/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/e1827c52ba514bd9956f4efd9b96096f is 69, key is bfeb2336aed7,40631,1731504792390/rs:state/1731504793508/Put/seqid=0 2024-11-13T13:34:43,251 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35783 is added to blk_1073741856_1032 (size=5156) 2024-11-13T13:34:43,252 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35441 is added to blk_1073741856_1032 (size=5156) 2024-11-13T13:34:43,252 INFO [M:0;bfeb2336aed7:37965 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:38779/user/jenkins/test-data/52d71c14-a597-0de0-63d6-b75f134ce0ba/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/e1827c52ba514bd9956f4efd9b96096f 2024-11-13T13:34:43,282 DEBUG [M:0;bfeb2336aed7:37965 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38779/user/jenkins/test-data/52d71c14-a597-0de0-63d6-b75f134ce0ba/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/d0988297f97a43658c0e1f108de3e23d is 52, key is load_balancer_on/state:d/1731504794752/Put/seqid=0 2024-11-13T13:34:43,289 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35441 is added to blk_1073741857_1033 (size=5056) 2024-11-13T13:34:43,290 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35783 is added to blk_1073741857_1033 (size=5056) 2024-11-13T13:34:43,290 INFO [M:0;bfeb2336aed7:37965 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:38779/user/jenkins/test-data/52d71c14-a597-0de0-63d6-b75f134ce0ba/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/d0988297f97a43658c0e1f108de3e23d 2024-11-13T13:34:43,299 DEBUG [M:0;bfeb2336aed7:37965 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38779/user/jenkins/test-data/52d71c14-a597-0de0-63d6-b75f134ce0ba/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/53ff1da960cd41398bb409d9aa72e32b as hdfs://localhost:38779/user/jenkins/test-data/52d71c14-a597-0de0-63d6-b75f134ce0ba/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/53ff1da960cd41398bb409d9aa72e32b 2024-11-13T13:34:43,308 INFO [M:0;bfeb2336aed7:37965 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38779/user/jenkins/test-data/52d71c14-a597-0de0-63d6-b75f134ce0ba/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/53ff1da960cd41398bb409d9aa72e32b, entries=8, sequenceid=59, filesize=5.5 K 2024-11-13T13:34:43,309 DEBUG [M:0;bfeb2336aed7:37965 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38779/user/jenkins/test-data/52d71c14-a597-0de0-63d6-b75f134ce0ba/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/cc37a2232f4047eeb669ec284ebf1030 as hdfs://localhost:38779/user/jenkins/test-data/52d71c14-a597-0de0-63d6-b75f134ce0ba/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/cc37a2232f4047eeb669ec284ebf1030 2024-11-13T13:34:43,316 INFO [M:0;bfeb2336aed7:37965 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for cc37a2232f4047eeb669ec284ebf1030 2024-11-13T13:34:43,317 INFO [M:0;bfeb2336aed7:37965 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38779/user/jenkins/test-data/52d71c14-a597-0de0-63d6-b75f134ce0ba/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/cc37a2232f4047eeb669ec284ebf1030, entries=6, sequenceid=59, filesize=6.1 K 2024-11-13T13:34:43,318 DEBUG [M:0;bfeb2336aed7:37965 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38779/user/jenkins/test-data/52d71c14-a597-0de0-63d6-b75f134ce0ba/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/e1827c52ba514bd9956f4efd9b96096f as hdfs://localhost:38779/user/jenkins/test-data/52d71c14-a597-0de0-63d6-b75f134ce0ba/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/e1827c52ba514bd9956f4efd9b96096f 2024-11-13T13:34:43,326 INFO [M:0;bfeb2336aed7:37965 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38779/user/jenkins/test-data/52d71c14-a597-0de0-63d6-b75f134ce0ba/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/e1827c52ba514bd9956f4efd9b96096f, entries=1, sequenceid=59, filesize=5.0 K 2024-11-13T13:34:43,328 DEBUG [M:0;bfeb2336aed7:37965 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38779/user/jenkins/test-data/52d71c14-a597-0de0-63d6-b75f134ce0ba/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/d0988297f97a43658c0e1f108de3e23d as hdfs://localhost:38779/user/jenkins/test-data/52d71c14-a597-0de0-63d6-b75f134ce0ba/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/d0988297f97a43658c0e1f108de3e23d 2024-11-13T13:34:43,335 INFO [M:0;bfeb2336aed7:37965 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38779/user/jenkins/test-data/52d71c14-a597-0de0-63d6-b75f134ce0ba/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/d0988297f97a43658c0e1f108de3e23d, entries=1, sequenceid=59, filesize=4.9 K 2024-11-13T13:34:43,337 INFO [M:0;bfeb2336aed7:37965 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.02 KB/23576, heapSize ~29.14 KB/29840, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 174ms, sequenceid=59, compaction requested=false 2024-11-13T13:34:43,339 INFO [M:0;bfeb2336aed7:37965 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T13:34:43,339 DEBUG [M:0;bfeb2336aed7:37965 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731504883163Disabling compacts and flushes for region at 1731504883163Disabling writes for close at 1731504883163Obtaining lock to block concurrent updates at 1731504883163Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731504883163Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23576, getHeapSize=29840, getOffHeapSize=0, getCellsCount=70 at 1731504883164 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731504883165 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731504883165Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731504883181 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731504883182 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731504883198 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731504883214 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731504883214Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731504883229 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731504883245 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731504883245Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731504883260 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731504883281 (+21 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731504883281Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@51873eef: reopening flushed file at 1731504883297 (+16 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6ee7a708: reopening flushed file at 1731504883308 (+11 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2e702c7f: reopening flushed file at 1731504883317 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@35bf6071: reopening flushed file at 1731504883327 (+10 ms)Finished flush of dataSize ~23.02 KB/23576, heapSize ~29.14 KB/29840, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 174ms, sequenceid=59, compaction requested=false at 1731504883337 (+10 ms)Writing region close event to WAL at 1731504883339 (+2 ms)Closed at 1731504883339 2024-11-13T13:34:43,340 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:34:43,340 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:34:43,340 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:34:43,341 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:34:43,341 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:34:43,344 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35441 is added to blk_1073741830_1006 (size=27973) 2024-11-13T13:34:43,344 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35783 is added to blk_1073741830_1006 (size=27973) 2024-11-13T13:34:43,346 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-13T13:34:43,347 INFO [M:0;bfeb2336aed7:37965 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-13T13:34:43,347 INFO [M:0;bfeb2336aed7:37965 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:37965 2024-11-13T13:34:43,347 INFO [M:0;bfeb2336aed7:37965 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-13T13:34:43,460 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37965-0x1013466588e0000, quorum=127.0.0.1:49285, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-13T13:34:43,460 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37965-0x1013466588e0000, quorum=127.0.0.1:49285, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-13T13:34:43,460 INFO [M:0;bfeb2336aed7:37965 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-13T13:34:43,497 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1bf97579{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-13T13:34:43,500 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@22b88bcb{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-13T13:34:43,500 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-13T13:34:43,500 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2d48d695{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-13T13:34:43,500 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@11effdcd{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7e988ba8-14d4-a440-4ce7-dbb30dde5b42/hadoop.log.dir/,STOPPED} 2024-11-13T13:34:43,503 WARN [BP-164363160-172.17.0.2-1731504787152 heartbeating to localhost/127.0.0.1:38779 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-13T13:34:43,503 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-13T13:34:43,503 WARN [BP-164363160-172.17.0.2-1731504787152 heartbeating to localhost/127.0.0.1:38779 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-164363160-172.17.0.2-1731504787152 (Datanode Uuid a9c36546-5d6b-40f8-87d9-e6813ef6ccd0) service to localhost/127.0.0.1:38779 2024-11-13T13:34:43,503 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-13T13:34:43,505 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7e988ba8-14d4-a440-4ce7-dbb30dde5b42/cluster_bc4ed2ba-25d8-59f1-9b5b-16401e7ae59d/data/data3/current/BP-164363160-172.17.0.2-1731504787152 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-13T13:34:43,505 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7e988ba8-14d4-a440-4ce7-dbb30dde5b42/cluster_bc4ed2ba-25d8-59f1-9b5b-16401e7ae59d/data/data4/current/BP-164363160-172.17.0.2-1731504787152 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-13T13:34:43,506 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-13T13:34:43,509 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7b07d1ba{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-13T13:34:43,509 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@43e0a762{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-13T13:34:43,509 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-13T13:34:43,509 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@371e191c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-13T13:34:43,510 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@28778f0f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7e988ba8-14d4-a440-4ce7-dbb30dde5b42/hadoop.log.dir/,STOPPED} 2024-11-13T13:34:43,517 WARN [BP-164363160-172.17.0.2-1731504787152 heartbeating to localhost/127.0.0.1:38779 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-13T13:34:43,517 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-13T13:34:43,517 WARN [BP-164363160-172.17.0.2-1731504787152 heartbeating to localhost/127.0.0.1:38779 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-164363160-172.17.0.2-1731504787152 (Datanode Uuid f3c2b58d-5e24-44a9-a0dc-778fb35f8a5b) service to localhost/127.0.0.1:38779 2024-11-13T13:34:43,517 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-13T13:34:43,518 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7e988ba8-14d4-a440-4ce7-dbb30dde5b42/cluster_bc4ed2ba-25d8-59f1-9b5b-16401e7ae59d/data/data1/current/BP-164363160-172.17.0.2-1731504787152 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-13T13:34:43,518 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7e988ba8-14d4-a440-4ce7-dbb30dde5b42/cluster_bc4ed2ba-25d8-59f1-9b5b-16401e7ae59d/data/data2/current/BP-164363160-172.17.0.2-1731504787152 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-13T13:34:43,519 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-13T13:34:43,529 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@735fa16a{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-13T13:34:43,530 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6c26a5a3{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-13T13:34:43,530 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-13T13:34:43,530 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@70be1389{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-13T13:34:43,530 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@ddc8467{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7e988ba8-14d4-a440-4ce7-dbb30dde5b42/hadoop.log.dir/,STOPPED} 2024-11-13T13:34:43,539 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-13T13:34:43,576 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-13T13:34:43,587 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testSlowSyncLogRolling Thread=82 (was 12) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-1 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: Idle-Rpc-Conn-Sweeper-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-5-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:38779 from jenkins.hfs.0 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: nioEventLoopGroup-5-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: nioEventLoopGroup-3-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: regionserver/bfeb2336aed7:0.procedureResultReporter java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Potentially hanging thread: ForkJoinPool-2-worker-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: nioEventLoopGroup-4-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async-Client-Retry-Timer-pool-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: SessionTracker java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Potentially hanging thread: GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Potentially hanging thread: SSL Certificates Store Monitor java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: regionserver/bfeb2336aed7:0.leaseChecker java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.regionserver.LeaseManager.run(LeaseManager.java:82) Potentially hanging thread: weak-ref-cleaner-strictcontextstorage java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//io.opentelemetry.context.StrictContextStorage$PendingScopes.run(StrictContextStorage.java:269) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: nioEventLoopGroup-5-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: LeaseRenewer:jenkins@localhost:38779 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.0@localhost:38779 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-1-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Timer for 'HBase' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: master/bfeb2336aed7:0:becomeActiveMaster-MemStoreChunkPool Statistics java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:38779 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: master/bfeb2336aed7:0:becomeActiveMaster-MemStoreChunkPool Statistics java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-1-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:38779 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: ForkJoinPool-2-worker-6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: Monitor thread for TaskMonitor java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: org.apache.hadoop.hdfs.PeerCache@60041d6f java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: ForkJoinPool-2-worker-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-2-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-2-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: RpcClient-timer-pool-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-7 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-4 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: nioEventLoopGroup-3-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-3-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-4-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:38779 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:38779 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: SnapshotHandlerChoreCleaner java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: ForkJoinPool-2-worker-3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: HMaster-EventLoopGroup-1-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: nioEventLoopGroup-2-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:38779 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-4-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HBase-Metrics2-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: Time-limited test.named-queue-events-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: region-location-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=402 (was 287) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=206 (was 220), ProcessCount=11 (was 11), AvailableMemoryMB=4202 (was 5043) 2024-11-13T13:34:43,596 INFO [regionserver/bfeb2336aed7:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-13T13:34:43,597 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnDatanodeDeath Thread=83, OpenFileDescriptor=402, MaxFileDescriptor=1048576, SystemLoadAverage=206, ProcessCount=11, AvailableMemoryMB=4200 2024-11-13T13:34:43,597 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-13T13:34:43,597 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7e988ba8-14d4-a440-4ce7-dbb30dde5b42/hadoop.log.dir so I do NOT create it in target/test-data/7dd39109-2b38-9fe5-bca7-7e0f5100da8a 2024-11-13T13:34:43,597 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7e988ba8-14d4-a440-4ce7-dbb30dde5b42/hadoop.tmp.dir so I do NOT create it in target/test-data/7dd39109-2b38-9fe5-bca7-7e0f5100da8a 2024-11-13T13:34:43,597 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7dd39109-2b38-9fe5-bca7-7e0f5100da8a/cluster_f03ad9ef-4785-3bcf-8cc3-d268983a1bf4, deleteOnExit=true 2024-11-13T13:34:43,597 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-13T13:34:43,597 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7dd39109-2b38-9fe5-bca7-7e0f5100da8a/test.cache.data in system properties and HBase conf 2024-11-13T13:34:43,598 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7dd39109-2b38-9fe5-bca7-7e0f5100da8a/hadoop.tmp.dir in system properties and HBase conf 2024-11-13T13:34:43,598 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7dd39109-2b38-9fe5-bca7-7e0f5100da8a/hadoop.log.dir in system properties and HBase conf 2024-11-13T13:34:43,598 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7dd39109-2b38-9fe5-bca7-7e0f5100da8a/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-13T13:34:43,598 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7dd39109-2b38-9fe5-bca7-7e0f5100da8a/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-13T13:34:43,598 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-13T13:34:43,598 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-13T13:34:43,598 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7dd39109-2b38-9fe5-bca7-7e0f5100da8a/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-13T13:34:43,598 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7dd39109-2b38-9fe5-bca7-7e0f5100da8a/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-13T13:34:43,598 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7dd39109-2b38-9fe5-bca7-7e0f5100da8a/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-13T13:34:43,598 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7dd39109-2b38-9fe5-bca7-7e0f5100da8a/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-13T13:34:43,599 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7dd39109-2b38-9fe5-bca7-7e0f5100da8a/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-13T13:34:43,599 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7dd39109-2b38-9fe5-bca7-7e0f5100da8a/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-13T13:34:43,599 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7dd39109-2b38-9fe5-bca7-7e0f5100da8a/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-13T13:34:43,599 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7dd39109-2b38-9fe5-bca7-7e0f5100da8a/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-13T13:34:43,599 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7dd39109-2b38-9fe5-bca7-7e0f5100da8a/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-13T13:34:43,599 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7dd39109-2b38-9fe5-bca7-7e0f5100da8a/nfs.dump.dir in system properties and HBase conf 2024-11-13T13:34:43,599 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7dd39109-2b38-9fe5-bca7-7e0f5100da8a/java.io.tmpdir in system properties and HBase conf 2024-11-13T13:34:43,599 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7dd39109-2b38-9fe5-bca7-7e0f5100da8a/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-13T13:34:43,599 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7dd39109-2b38-9fe5-bca7-7e0f5100da8a/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-13T13:34:43,599 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7dd39109-2b38-9fe5-bca7-7e0f5100da8a/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-13T13:34:43,613 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-13T13:34:44,064 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-13T13:34:44,071 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-13T13:34:44,073 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-13T13:34:44,073 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-13T13:34:44,073 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-13T13:34:44,074 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-13T13:34:44,075 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2152d149{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7dd39109-2b38-9fe5-bca7-7e0f5100da8a/hadoop.log.dir/,AVAILABLE} 2024-11-13T13:34:44,075 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@542ee468{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-13T13:34:44,200 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@81550dd{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7dd39109-2b38-9fe5-bca7-7e0f5100da8a/java.io.tmpdir/jetty-localhost-33985-hadoop-hdfs-3_4_1-tests_jar-_-any-2294478152689253728/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-13T13:34:44,201 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@2d71062d{HTTP/1.1, (http/1.1)}{localhost:33985} 2024-11-13T13:34:44,201 INFO [Time-limited test {}] server.Server(415): Started @98889ms 2024-11-13T13:34:44,219 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-13T13:34:44,474 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-13T13:34:44,477 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-13T13:34:44,478 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-13T13:34:44,478 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-13T13:34:44,478 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-13T13:34:44,479 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@21e00560{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7dd39109-2b38-9fe5-bca7-7e0f5100da8a/hadoop.log.dir/,AVAILABLE} 2024-11-13T13:34:44,479 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3b01355c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-13T13:34:44,596 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@426dc4fa{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7dd39109-2b38-9fe5-bca7-7e0f5100da8a/java.io.tmpdir/jetty-localhost-38971-hadoop-hdfs-3_4_1-tests_jar-_-any-1293285899502330357/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-13T13:34:44,596 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@417f4bff{HTTP/1.1, (http/1.1)}{localhost:38971} 2024-11-13T13:34:44,597 INFO [Time-limited test {}] server.Server(415): Started @99284ms 2024-11-13T13:34:44,599 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-13T13:34:44,648 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-13T13:34:44,652 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-13T13:34:44,654 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-13T13:34:44,654 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-13T13:34:44,655 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-13T13:34:44,655 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@17d00685{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7dd39109-2b38-9fe5-bca7-7e0f5100da8a/hadoop.log.dir/,AVAILABLE} 2024-11-13T13:34:44,656 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2220be00{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-13T13:34:44,769 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@210f4ee9{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7dd39109-2b38-9fe5-bca7-7e0f5100da8a/java.io.tmpdir/jetty-localhost-39663-hadoop-hdfs-3_4_1-tests_jar-_-any-11659564236119264593/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-13T13:34:44,770 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@1dedf320{HTTP/1.1, (http/1.1)}{localhost:39663} 2024-11-13T13:34:44,770 INFO [Time-limited test {}] server.Server(415): Started @99457ms 2024-11-13T13:34:44,771 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-13T13:34:45,621 WARN [Thread-447 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7dd39109-2b38-9fe5-bca7-7e0f5100da8a/cluster_f03ad9ef-4785-3bcf-8cc3-d268983a1bf4/data/data2/current/BP-1458020420-172.17.0.2-1731504883629/current, will proceed with Du for space computation calculation, 2024-11-13T13:34:45,621 WARN [Thread-446 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7dd39109-2b38-9fe5-bca7-7e0f5100da8a/cluster_f03ad9ef-4785-3bcf-8cc3-d268983a1bf4/data/data1/current/BP-1458020420-172.17.0.2-1731504883629/current, will proceed with Du for space computation calculation, 2024-11-13T13:34:45,644 WARN [Thread-410 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-13T13:34:45,647 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xe5312c39c3e6f0fc with lease ID 0x7a74b0c9c705f547: Processing first storage report for DS-13a8b67a-79c4-472b-adf3-4501375f0cea from datanode DatanodeRegistration(127.0.0.1:35491, datanodeUuid=88bcda19-8afb-4636-a539-065f94099559, infoPort=37211, infoSecurePort=0, ipcPort=33779, storageInfo=lv=-57;cid=testClusterID;nsid=879276898;c=1731504883629) 2024-11-13T13:34:45,647 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xe5312c39c3e6f0fc with lease ID 0x7a74b0c9c705f547: from storage DS-13a8b67a-79c4-472b-adf3-4501375f0cea node DatanodeRegistration(127.0.0.1:35491, datanodeUuid=88bcda19-8afb-4636-a539-065f94099559, infoPort=37211, infoSecurePort=0, ipcPort=33779, storageInfo=lv=-57;cid=testClusterID;nsid=879276898;c=1731504883629), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-13T13:34:45,647 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xe5312c39c3e6f0fc with lease ID 0x7a74b0c9c705f547: Processing first storage report for DS-1617a0b5-03e0-4ed6-8647-5cb25eec8c18 from datanode DatanodeRegistration(127.0.0.1:35491, datanodeUuid=88bcda19-8afb-4636-a539-065f94099559, infoPort=37211, infoSecurePort=0, ipcPort=33779, storageInfo=lv=-57;cid=testClusterID;nsid=879276898;c=1731504883629) 2024-11-13T13:34:45,647 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xe5312c39c3e6f0fc with lease ID 0x7a74b0c9c705f547: from storage DS-1617a0b5-03e0-4ed6-8647-5cb25eec8c18 node DatanodeRegistration(127.0.0.1:35491, datanodeUuid=88bcda19-8afb-4636-a539-065f94099559, infoPort=37211, infoSecurePort=0, ipcPort=33779, storageInfo=lv=-57;cid=testClusterID;nsid=879276898;c=1731504883629), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-13T13:34:45,820 WARN [Thread-457 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7dd39109-2b38-9fe5-bca7-7e0f5100da8a/cluster_f03ad9ef-4785-3bcf-8cc3-d268983a1bf4/data/data3/current/BP-1458020420-172.17.0.2-1731504883629/current, will proceed with Du for space computation calculation, 2024-11-13T13:34:45,820 WARN [Thread-458 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7dd39109-2b38-9fe5-bca7-7e0f5100da8a/cluster_f03ad9ef-4785-3bcf-8cc3-d268983a1bf4/data/data4/current/BP-1458020420-172.17.0.2-1731504883629/current, will proceed with Du for space computation calculation, 2024-11-13T13:34:45,841 WARN [Thread-433 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-13T13:34:45,843 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x96f626871d98831f with lease ID 0x7a74b0c9c705f548: Processing first storage report for DS-d98de0ae-af73-4ddc-a1ff-f2f449cba749 from datanode DatanodeRegistration(127.0.0.1:45059, datanodeUuid=7c0d5e38-08a6-4860-be56-6593de5a6caf, infoPort=33439, infoSecurePort=0, ipcPort=43647, storageInfo=lv=-57;cid=testClusterID;nsid=879276898;c=1731504883629) 2024-11-13T13:34:45,843 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x96f626871d98831f with lease ID 0x7a74b0c9c705f548: from storage DS-d98de0ae-af73-4ddc-a1ff-f2f449cba749 node DatanodeRegistration(127.0.0.1:45059, datanodeUuid=7c0d5e38-08a6-4860-be56-6593de5a6caf, infoPort=33439, infoSecurePort=0, ipcPort=43647, storageInfo=lv=-57;cid=testClusterID;nsid=879276898;c=1731504883629), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-13T13:34:45,843 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x96f626871d98831f with lease ID 0x7a74b0c9c705f548: Processing first storage report for DS-55b02f61-2443-4003-a170-8d73133e1db7 from datanode DatanodeRegistration(127.0.0.1:45059, datanodeUuid=7c0d5e38-08a6-4860-be56-6593de5a6caf, infoPort=33439, infoSecurePort=0, ipcPort=43647, storageInfo=lv=-57;cid=testClusterID;nsid=879276898;c=1731504883629) 2024-11-13T13:34:45,843 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x96f626871d98831f with lease ID 0x7a74b0c9c705f548: from storage DS-55b02f61-2443-4003-a170-8d73133e1db7 node DatanodeRegistration(127.0.0.1:45059, datanodeUuid=7c0d5e38-08a6-4860-be56-6593de5a6caf, infoPort=33439, infoSecurePort=0, ipcPort=43647, storageInfo=lv=-57;cid=testClusterID;nsid=879276898;c=1731504883629), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-13T13:34:45,927 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7dd39109-2b38-9fe5-bca7-7e0f5100da8a 2024-11-13T13:34:45,930 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7dd39109-2b38-9fe5-bca7-7e0f5100da8a/cluster_f03ad9ef-4785-3bcf-8cc3-d268983a1bf4/zookeeper_0, clientPort=51842, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7dd39109-2b38-9fe5-bca7-7e0f5100da8a/cluster_f03ad9ef-4785-3bcf-8cc3-d268983a1bf4/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7dd39109-2b38-9fe5-bca7-7e0f5100da8a/cluster_f03ad9ef-4785-3bcf-8cc3-d268983a1bf4/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-13T13:34:45,931 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=51842 2024-11-13T13:34:45,931 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T13:34:45,933 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T13:34:45,944 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35491 is added to blk_1073741825_1001 (size=7) 2024-11-13T13:34:45,944 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45059 is added to blk_1073741825_1001 (size=7) 2024-11-13T13:34:45,946 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:46753/user/jenkins/test-data/195d6abf-411a-2b00-93e4-bb00d46a3cfe with version=8 2024-11-13T13:34:45,946 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:38779/user/jenkins/test-data/52d71c14-a597-0de0-63d6-b75f134ce0ba/hbase-staging 2024-11-13T13:34:45,949 INFO [Time-limited test {}] client.ConnectionUtils(128): master/bfeb2336aed7:0 server-side Connection retries=45 2024-11-13T13:34:45,949 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-13T13:34:45,949 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-13T13:34:45,949 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-13T13:34:45,949 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-13T13:34:45,949 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-13T13:34:45,950 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-13T13:34:45,950 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-13T13:34:45,952 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:46715 2024-11-13T13:34:45,954 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:46715 connecting to ZooKeeper ensemble=127.0.0.1:51842 2024-11-13T13:34:46,011 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:467150x0, quorum=127.0.0.1:51842, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-13T13:34:46,011 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:46715-0x1013467cc540000 connected 2024-11-13T13:34:46,097 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T13:34:46,099 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T13:34:46,102 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:46715-0x1013467cc540000, quorum=127.0.0.1:51842, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-13T13:34:46,103 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:46753/user/jenkins/test-data/195d6abf-411a-2b00-93e4-bb00d46a3cfe, hbase.cluster.distributed=false 2024-11-13T13:34:46,105 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:46715-0x1013467cc540000, quorum=127.0.0.1:51842, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-13T13:34:46,105 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=46715 2024-11-13T13:34:46,105 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=46715 2024-11-13T13:34:46,106 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=46715 2024-11-13T13:34:46,106 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=46715 2024-11-13T13:34:46,106 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=46715 2024-11-13T13:34:46,123 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/bfeb2336aed7:0 server-side Connection retries=45 2024-11-13T13:34:46,123 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-13T13:34:46,123 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-13T13:34:46,123 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-13T13:34:46,123 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-13T13:34:46,123 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-13T13:34:46,123 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-13T13:34:46,123 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-13T13:34:46,124 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:41113 2024-11-13T13:34:46,126 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:41113 connecting to ZooKeeper ensemble=127.0.0.1:51842 2024-11-13T13:34:46,127 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T13:34:46,130 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T13:34:46,139 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:411130x0, quorum=127.0.0.1:51842, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-13T13:34:46,139 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:411130x0, quorum=127.0.0.1:51842, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-13T13:34:46,140 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-13T13:34:46,140 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:41113-0x1013467cc540001 connected 2024-11-13T13:34:46,148 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-13T13:34:46,149 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41113-0x1013467cc540001, quorum=127.0.0.1:51842, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-13T13:34:46,150 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41113-0x1013467cc540001, quorum=127.0.0.1:51842, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-13T13:34:46,150 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=41113 2024-11-13T13:34:46,152 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=41113 2024-11-13T13:34:46,152 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=41113 2024-11-13T13:34:46,156 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=41113 2024-11-13T13:34:46,157 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=41113 2024-11-13T13:34:46,172 DEBUG [M:0;bfeb2336aed7:46715 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;bfeb2336aed7:46715 2024-11-13T13:34:46,172 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/bfeb2336aed7,46715,1731504885949 2024-11-13T13:34:46,183 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46715-0x1013467cc540000, quorum=127.0.0.1:51842, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-13T13:34:46,184 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:46715-0x1013467cc540000, quorum=127.0.0.1:51842, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/bfeb2336aed7,46715,1731504885949 2024-11-13T13:34:46,188 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41113-0x1013467cc540001, quorum=127.0.0.1:51842, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-13T13:34:46,191 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41113-0x1013467cc540001, quorum=127.0.0.1:51842, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-13T13:34:46,191 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46715-0x1013467cc540000, quorum=127.0.0.1:51842, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T13:34:46,191 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41113-0x1013467cc540001, quorum=127.0.0.1:51842, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T13:34:46,192 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:46715-0x1013467cc540000, quorum=127.0.0.1:51842, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-13T13:34:46,193 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/bfeb2336aed7,46715,1731504885949 from backup master directory 2024-11-13T13:34:46,201 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41113-0x1013467cc540001, quorum=127.0.0.1:51842, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-13T13:34:46,201 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46715-0x1013467cc540000, quorum=127.0.0.1:51842, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/bfeb2336aed7,46715,1731504885949 2024-11-13T13:34:46,202 WARN [master/bfeb2336aed7:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-13T13:34:46,202 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46715-0x1013467cc540000, quorum=127.0.0.1:51842, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-13T13:34:46,202 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=bfeb2336aed7,46715,1731504885949 2024-11-13T13:34:46,213 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:46753/user/jenkins/test-data/195d6abf-411a-2b00-93e4-bb00d46a3cfe/hbase.id] with ID: e0a63878-39b5-482f-ae13-e7095e981dc3 2024-11-13T13:34:46,213 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:46753/user/jenkins/test-data/195d6abf-411a-2b00-93e4-bb00d46a3cfe/.tmp/hbase.id 2024-11-13T13:34:46,227 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35491 is added to blk_1073741826_1002 (size=42) 2024-11-13T13:34:46,228 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45059 is added to blk_1073741826_1002 (size=42) 2024-11-13T13:34:46,229 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:46753/user/jenkins/test-data/195d6abf-411a-2b00-93e4-bb00d46a3cfe/.tmp/hbase.id]:[hdfs://localhost:46753/user/jenkins/test-data/195d6abf-411a-2b00-93e4-bb00d46a3cfe/hbase.id] 2024-11-13T13:34:46,245 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T13:34:46,245 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-13T13:34:46,248 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 3ms. 2024-11-13T13:34:46,254 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41113-0x1013467cc540001, quorum=127.0.0.1:51842, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T13:34:46,254 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46715-0x1013467cc540000, quorum=127.0.0.1:51842, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T13:34:46,278 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35491 is added to blk_1073741827_1003 (size=196) 2024-11-13T13:34:46,278 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45059 is added to blk_1073741827_1003 (size=196) 2024-11-13T13:34:46,279 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-13T13:34:46,280 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-13T13:34:46,280 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-13T13:34:46,289 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45059 is added to blk_1073741828_1004 (size=1189) 2024-11-13T13:34:46,289 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35491 is added to blk_1073741828_1004 (size=1189) 2024-11-13T13:34:46,691 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:46753/user/jenkins/test-data/195d6abf-411a-2b00-93e4-bb00d46a3cfe/MasterData/data/master/store 2024-11-13T13:34:46,700 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35491 is added to blk_1073741829_1005 (size=34) 2024-11-13T13:34:46,701 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45059 is added to blk_1073741829_1005 (size=34) 2024-11-13T13:34:46,702 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T13:34:46,702 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-13T13:34:46,703 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T13:34:46,703 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T13:34:46,703 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-13T13:34:46,703 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T13:34:46,703 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T13:34:46,703 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731504886702Disabling compacts and flushes for region at 1731504886702Disabling writes for close at 1731504886703 (+1 ms)Writing region close event to WAL at 1731504886703Closed at 1731504886703 2024-11-13T13:34:46,704 WARN [master/bfeb2336aed7:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:46753/user/jenkins/test-data/195d6abf-411a-2b00-93e4-bb00d46a3cfe/MasterData/data/master/store/.initializing 2024-11-13T13:34:46,705 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:46753/user/jenkins/test-data/195d6abf-411a-2b00-93e4-bb00d46a3cfe/MasterData/WALs/bfeb2336aed7,46715,1731504885949 2024-11-13T13:34:46,708 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=bfeb2336aed7%2C46715%2C1731504885949, suffix=, logDir=hdfs://localhost:46753/user/jenkins/test-data/195d6abf-411a-2b00-93e4-bb00d46a3cfe/MasterData/WALs/bfeb2336aed7,46715,1731504885949, archiveDir=hdfs://localhost:46753/user/jenkins/test-data/195d6abf-411a-2b00-93e4-bb00d46a3cfe/MasterData/oldWALs, maxLogs=10 2024-11-13T13:34:46,709 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor bfeb2336aed7%2C46715%2C1731504885949.1731504886709 2024-11-13T13:34:46,716 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/195d6abf-411a-2b00-93e4-bb00d46a3cfe/MasterData/WALs/bfeb2336aed7,46715,1731504885949/bfeb2336aed7%2C46715%2C1731504885949.1731504886709 2024-11-13T13:34:46,717 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33439:33439),(127.0.0.1/127.0.0.1:37211:37211)] 2024-11-13T13:34:46,717 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-13T13:34:46,718 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T13:34:46,718 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-13T13:34:46,718 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-13T13:34:46,720 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-13T13:34:46,722 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-13T13:34:46,722 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T13:34:46,723 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T13:34:46,723 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-13T13:34:46,725 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-13T13:34:46,725 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T13:34:46,726 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T13:34:46,726 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-13T13:34:46,728 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-13T13:34:46,729 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T13:34:46,729 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T13:34:46,729 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-13T13:34:46,731 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-13T13:34:46,731 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T13:34:46,732 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T13:34:46,732 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-13T13:34:46,733 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46753/user/jenkins/test-data/195d6abf-411a-2b00-93e4-bb00d46a3cfe/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-13T13:34:46,734 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46753/user/jenkins/test-data/195d6abf-411a-2b00-93e4-bb00d46a3cfe/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-13T13:34:46,736 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-13T13:34:46,736 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-13T13:34:46,737 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-13T13:34:46,739 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-13T13:34:46,743 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46753/user/jenkins/test-data/195d6abf-411a-2b00-93e4-bb00d46a3cfe/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-13T13:34:46,744 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=824534, jitterRate=0.0484495609998703}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-13T13:34:46,745 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731504886718Initializing all the Stores at 1731504886719 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731504886720 (+1 ms)Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731504886720Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731504886720Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731504886720Cleaning up temporary data from old regions at 1731504886736 (+16 ms)Region opened successfully at 1731504886745 (+9 ms) 2024-11-13T13:34:46,746 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-13T13:34:46,752 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2bc56628, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=bfeb2336aed7/172.17.0.2:0 2024-11-13T13:34:46,754 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-13T13:34:46,754 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-13T13:34:46,754 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-13T13:34:46,754 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-13T13:34:46,756 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 1 msec 2024-11-13T13:34:46,758 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 1 msec 2024-11-13T13:34:46,758 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-13T13:34:46,763 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-13T13:34:46,764 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46715-0x1013467cc540000, quorum=127.0.0.1:51842, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-13T13:34:46,970 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-13T13:34:46,970 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-13T13:34:46,971 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46715-0x1013467cc540000, quorum=127.0.0.1:51842, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-13T13:34:46,980 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-13T13:34:46,981 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-13T13:34:46,982 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46715-0x1013467cc540000, quorum=127.0.0.1:51842, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-13T13:34:46,991 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-13T13:34:46,992 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46715-0x1013467cc540000, quorum=127.0.0.1:51842, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-13T13:34:47,001 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-13T13:34:47,004 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46715-0x1013467cc540000, quorum=127.0.0.1:51842, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-13T13:34:47,012 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-13T13:34:47,023 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41113-0x1013467cc540001, quorum=127.0.0.1:51842, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-13T13:34:47,023 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46715-0x1013467cc540000, quorum=127.0.0.1:51842, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-13T13:34:47,023 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41113-0x1013467cc540001, quorum=127.0.0.1:51842, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T13:34:47,023 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46715-0x1013467cc540000, quorum=127.0.0.1:51842, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T13:34:47,023 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=bfeb2336aed7,46715,1731504885949, sessionid=0x1013467cc540000, setting cluster-up flag (Was=false) 2024-11-13T13:34:47,044 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41113-0x1013467cc540001, quorum=127.0.0.1:51842, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T13:34:47,044 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46715-0x1013467cc540000, quorum=127.0.0.1:51842, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T13:34:47,075 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-13T13:34:47,078 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=bfeb2336aed7,46715,1731504885949 2024-11-13T13:34:47,096 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46715-0x1013467cc540000, quorum=127.0.0.1:51842, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T13:34:47,096 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41113-0x1013467cc540001, quorum=127.0.0.1:51842, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T13:34:47,128 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-13T13:34:47,129 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=bfeb2336aed7,46715,1731504885949 2024-11-13T13:34:47,131 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:46753/user/jenkins/test-data/195d6abf-411a-2b00-93e4-bb00d46a3cfe/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-13T13:34:47,133 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-13T13:34:47,134 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-13T13:34:47,134 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-13T13:34:47,134 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: bfeb2336aed7,46715,1731504885949 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-13T13:34:47,136 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/bfeb2336aed7:0, corePoolSize=5, maxPoolSize=5 2024-11-13T13:34:47,136 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/bfeb2336aed7:0, corePoolSize=5, maxPoolSize=5 2024-11-13T13:34:47,136 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/bfeb2336aed7:0, corePoolSize=5, maxPoolSize=5 2024-11-13T13:34:47,136 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/bfeb2336aed7:0, corePoolSize=5, maxPoolSize=5 2024-11-13T13:34:47,137 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/bfeb2336aed7:0, corePoolSize=10, maxPoolSize=10 2024-11-13T13:34:47,137 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/bfeb2336aed7:0, corePoolSize=1, maxPoolSize=1 2024-11-13T13:34:47,137 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/bfeb2336aed7:0, corePoolSize=2, maxPoolSize=2 2024-11-13T13:34:47,137 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/bfeb2336aed7:0, corePoolSize=1, maxPoolSize=1 2024-11-13T13:34:47,137 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731504917137 2024-11-13T13:34:47,138 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-13T13:34:47,138 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-13T13:34:47,138 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-13T13:34:47,138 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-13T13:34:47,138 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-13T13:34:47,138 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-13T13:34:47,139 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-13T13:34:47,139 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-13T13:34:47,140 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T13:34:47,140 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-13T13:34:47,140 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-13T13:34:47,144 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-13T13:34:47,144 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-13T13:34:47,145 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-13T13:34:47,145 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-13T13:34:47,145 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-13T13:34:47,145 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/bfeb2336aed7:0:becomeActiveMaster-HFileCleaner.large.0-1731504887145,5,FailOnTimeoutGroup] 2024-11-13T13:34:47,146 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/bfeb2336aed7:0:becomeActiveMaster-HFileCleaner.small.0-1731504887145,5,FailOnTimeoutGroup] 2024-11-13T13:34:47,146 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-13T13:34:47,147 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-13T13:34:47,147 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-13T13:34:47,147 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-13T13:34:47,153 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35491 is added to blk_1073741831_1007 (size=1321) 2024-11-13T13:34:47,153 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45059 is added to blk_1073741831_1007 (size=1321) 2024-11-13T13:34:47,160 INFO [RS:0;bfeb2336aed7:41113 {}] regionserver.HRegionServer(746): ClusterId : e0a63878-39b5-482f-ae13-e7095e981dc3 2024-11-13T13:34:47,160 DEBUG [RS:0;bfeb2336aed7:41113 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-13T13:34:47,171 DEBUG [RS:0;bfeb2336aed7:41113 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-13T13:34:47,171 DEBUG [RS:0;bfeb2336aed7:41113 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-13T13:34:47,182 DEBUG [RS:0;bfeb2336aed7:41113 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-13T13:34:47,182 DEBUG [RS:0;bfeb2336aed7:41113 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4eeaca7c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=bfeb2336aed7/172.17.0.2:0 2024-11-13T13:34:47,200 DEBUG [RS:0;bfeb2336aed7:41113 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;bfeb2336aed7:41113 2024-11-13T13:34:47,200 INFO [RS:0;bfeb2336aed7:41113 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-13T13:34:47,200 INFO [RS:0;bfeb2336aed7:41113 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-13T13:34:47,200 DEBUG [RS:0;bfeb2336aed7:41113 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-13T13:34:47,201 INFO [RS:0;bfeb2336aed7:41113 {}] regionserver.HRegionServer(2659): reportForDuty to master=bfeb2336aed7,46715,1731504885949 with port=41113, startcode=1731504886122 2024-11-13T13:34:47,201 DEBUG [RS:0;bfeb2336aed7:41113 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-13T13:34:47,203 INFO [HMaster-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54571, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=RegionServerStatusService 2024-11-13T13:34:47,204 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46715 {}] master.ServerManager(363): Checking decommissioned status of RegionServer bfeb2336aed7,41113,1731504886122 2024-11-13T13:34:47,204 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46715 {}] master.ServerManager(517): Registering regionserver=bfeb2336aed7,41113,1731504886122 2024-11-13T13:34:47,206 DEBUG [RS:0;bfeb2336aed7:41113 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:46753/user/jenkins/test-data/195d6abf-411a-2b00-93e4-bb00d46a3cfe 2024-11-13T13:34:47,206 DEBUG [RS:0;bfeb2336aed7:41113 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:46753 2024-11-13T13:34:47,206 DEBUG [RS:0;bfeb2336aed7:41113 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-13T13:34:47,212 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46715-0x1013467cc540000, quorum=127.0.0.1:51842, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-13T13:34:47,212 DEBUG [RS:0;bfeb2336aed7:41113 {}] zookeeper.ZKUtil(111): regionserver:41113-0x1013467cc540001, quorum=127.0.0.1:51842, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/bfeb2336aed7,41113,1731504886122 2024-11-13T13:34:47,213 WARN [RS:0;bfeb2336aed7:41113 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-13T13:34:47,213 INFO [RS:0;bfeb2336aed7:41113 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-13T13:34:47,213 DEBUG [RS:0;bfeb2336aed7:41113 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:46753/user/jenkins/test-data/195d6abf-411a-2b00-93e4-bb00d46a3cfe/WALs/bfeb2336aed7,41113,1731504886122 2024-11-13T13:34:47,213 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [bfeb2336aed7,41113,1731504886122] 2024-11-13T13:34:47,217 INFO [RS:0;bfeb2336aed7:41113 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-13T13:34:47,222 INFO [RS:0;bfeb2336aed7:41113 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-13T13:34:47,222 INFO [RS:0;bfeb2336aed7:41113 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-13T13:34:47,222 INFO [RS:0;bfeb2336aed7:41113 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-13T13:34:47,222 INFO [RS:0;bfeb2336aed7:41113 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-13T13:34:47,223 INFO [RS:0;bfeb2336aed7:41113 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-13T13:34:47,223 INFO [RS:0;bfeb2336aed7:41113 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-13T13:34:47,224 DEBUG [RS:0;bfeb2336aed7:41113 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/bfeb2336aed7:0, corePoolSize=1, maxPoolSize=1 2024-11-13T13:34:47,224 DEBUG [RS:0;bfeb2336aed7:41113 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/bfeb2336aed7:0, corePoolSize=1, maxPoolSize=1 2024-11-13T13:34:47,224 DEBUG [RS:0;bfeb2336aed7:41113 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/bfeb2336aed7:0, corePoolSize=1, maxPoolSize=1 2024-11-13T13:34:47,224 DEBUG [RS:0;bfeb2336aed7:41113 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/bfeb2336aed7:0, corePoolSize=1, maxPoolSize=1 2024-11-13T13:34:47,224 DEBUG [RS:0;bfeb2336aed7:41113 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/bfeb2336aed7:0, corePoolSize=1, maxPoolSize=1 2024-11-13T13:34:47,224 DEBUG [RS:0;bfeb2336aed7:41113 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/bfeb2336aed7:0, corePoolSize=2, maxPoolSize=2 2024-11-13T13:34:47,224 DEBUG [RS:0;bfeb2336aed7:41113 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/bfeb2336aed7:0, corePoolSize=1, maxPoolSize=1 2024-11-13T13:34:47,224 DEBUG [RS:0;bfeb2336aed7:41113 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/bfeb2336aed7:0, corePoolSize=1, maxPoolSize=1 2024-11-13T13:34:47,224 DEBUG [RS:0;bfeb2336aed7:41113 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/bfeb2336aed7:0, corePoolSize=1, maxPoolSize=1 2024-11-13T13:34:47,224 DEBUG [RS:0;bfeb2336aed7:41113 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/bfeb2336aed7:0, corePoolSize=1, maxPoolSize=1 2024-11-13T13:34:47,224 DEBUG [RS:0;bfeb2336aed7:41113 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/bfeb2336aed7:0, corePoolSize=1, maxPoolSize=1 2024-11-13T13:34:47,224 DEBUG [RS:0;bfeb2336aed7:41113 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/bfeb2336aed7:0, corePoolSize=1, maxPoolSize=1 2024-11-13T13:34:47,225 DEBUG [RS:0;bfeb2336aed7:41113 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/bfeb2336aed7:0, corePoolSize=3, maxPoolSize=3 2024-11-13T13:34:47,225 DEBUG [RS:0;bfeb2336aed7:41113 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/bfeb2336aed7:0, corePoolSize=3, maxPoolSize=3 2024-11-13T13:34:47,228 INFO [RS:0;bfeb2336aed7:41113 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-13T13:34:47,228 INFO [RS:0;bfeb2336aed7:41113 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-13T13:34:47,228 INFO [RS:0;bfeb2336aed7:41113 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-13T13:34:47,228 INFO [RS:0;bfeb2336aed7:41113 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-13T13:34:47,228 INFO [RS:0;bfeb2336aed7:41113 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-13T13:34:47,228 INFO [RS:0;bfeb2336aed7:41113 {}] hbase.ChoreService(168): Chore ScheduledChore name=bfeb2336aed7,41113,1731504886122-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-13T13:34:47,244 INFO [RS:0;bfeb2336aed7:41113 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-13T13:34:47,244 INFO [RS:0;bfeb2336aed7:41113 {}] hbase.ChoreService(168): Chore ScheduledChore name=bfeb2336aed7,41113,1731504886122-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-13T13:34:47,245 INFO [RS:0;bfeb2336aed7:41113 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-13T13:34:47,245 INFO [RS:0;bfeb2336aed7:41113 {}] regionserver.Replication(171): bfeb2336aed7,41113,1731504886122 started 2024-11-13T13:34:47,260 INFO [RS:0;bfeb2336aed7:41113 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-13T13:34:47,260 INFO [RS:0;bfeb2336aed7:41113 {}] regionserver.HRegionServer(1482): Serving as bfeb2336aed7,41113,1731504886122, RpcServer on bfeb2336aed7/172.17.0.2:41113, sessionid=0x1013467cc540001 2024-11-13T13:34:47,260 DEBUG [RS:0;bfeb2336aed7:41113 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-13T13:34:47,260 DEBUG [RS:0;bfeb2336aed7:41113 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager bfeb2336aed7,41113,1731504886122 2024-11-13T13:34:47,260 DEBUG [RS:0;bfeb2336aed7:41113 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'bfeb2336aed7,41113,1731504886122' 2024-11-13T13:34:47,260 DEBUG [RS:0;bfeb2336aed7:41113 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-13T13:34:47,261 DEBUG [RS:0;bfeb2336aed7:41113 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-13T13:34:47,262 DEBUG [RS:0;bfeb2336aed7:41113 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-13T13:34:47,262 DEBUG [RS:0;bfeb2336aed7:41113 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-13T13:34:47,262 DEBUG [RS:0;bfeb2336aed7:41113 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager bfeb2336aed7,41113,1731504886122 2024-11-13T13:34:47,262 DEBUG [RS:0;bfeb2336aed7:41113 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'bfeb2336aed7,41113,1731504886122' 2024-11-13T13:34:47,262 DEBUG [RS:0;bfeb2336aed7:41113 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-13T13:34:47,263 DEBUG [RS:0;bfeb2336aed7:41113 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-13T13:34:47,263 DEBUG [RS:0;bfeb2336aed7:41113 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-13T13:34:47,263 INFO [RS:0;bfeb2336aed7:41113 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-13T13:34:47,263 INFO [RS:0;bfeb2336aed7:41113 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-13T13:34:47,366 INFO [RS:0;bfeb2336aed7:41113 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=bfeb2336aed7%2C41113%2C1731504886122, suffix=, logDir=hdfs://localhost:46753/user/jenkins/test-data/195d6abf-411a-2b00-93e4-bb00d46a3cfe/WALs/bfeb2336aed7,41113,1731504886122, archiveDir=hdfs://localhost:46753/user/jenkins/test-data/195d6abf-411a-2b00-93e4-bb00d46a3cfe/oldWALs, maxLogs=32 2024-11-13T13:34:47,368 INFO [RS:0;bfeb2336aed7:41113 {}] monitor.StreamSlowMonitor(122): New stream slow monitor bfeb2336aed7%2C41113%2C1731504886122.1731504887368 2024-11-13T13:34:47,375 INFO [RS:0;bfeb2336aed7:41113 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/195d6abf-411a-2b00-93e4-bb00d46a3cfe/WALs/bfeb2336aed7,41113,1731504886122/bfeb2336aed7%2C41113%2C1731504886122.1731504887368 2024-11-13T13:34:47,378 DEBUG [RS:0;bfeb2336aed7:41113 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33439:33439),(127.0.0.1/127.0.0.1:37211:37211)] 2024-11-13T13:34:47,549 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:34:47,552 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:34:47,555 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:46753/user/jenkins/test-data/195d6abf-411a-2b00-93e4-bb00d46a3cfe/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-13T13:34:47,556 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:46753/user/jenkins/test-data/195d6abf-411a-2b00-93e4-bb00d46a3cfe 2024-11-13T13:34:47,582 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45059 is added to blk_1073741833_1009 (size=32) 2024-11-13T13:34:47,582 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35491 is added to blk_1073741833_1009 (size=32) 2024-11-13T13:34:47,583 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T13:34:47,585 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-13T13:34:47,587 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-13T13:34:47,588 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T13:34:47,588 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T13:34:47,589 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-13T13:34:47,590 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-13T13:34:47,591 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T13:34:47,591 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T13:34:47,592 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-13T13:34:47,594 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-13T13:34:47,594 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T13:34:47,595 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T13:34:47,595 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-13T13:34:47,597 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-13T13:34:47,597 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T13:34:47,598 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T13:34:47,598 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-13T13:34:47,600 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46753/user/jenkins/test-data/195d6abf-411a-2b00-93e4-bb00d46a3cfe/data/hbase/meta/1588230740 2024-11-13T13:34:47,600 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46753/user/jenkins/test-data/195d6abf-411a-2b00-93e4-bb00d46a3cfe/data/hbase/meta/1588230740 2024-11-13T13:34:47,602 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-13T13:34:47,602 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-13T13:34:47,603 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-13T13:34:47,612 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-13T13:34:47,619 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46753/user/jenkins/test-data/195d6abf-411a-2b00-93e4-bb00d46a3cfe/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-13T13:34:47,620 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=773055, jitterRate=-0.017010793089866638}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-13T13:34:47,621 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731504887583Initializing all the Stores at 1731504887585 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731504887585Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731504887585Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731504887585Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731504887585Cleaning up temporary data from old regions at 1731504887602 (+17 ms)Region opened successfully at 1731504887621 (+19 ms) 2024-11-13T13:34:47,622 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-13T13:34:47,622 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-13T13:34:47,622 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-13T13:34:47,622 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-13T13:34:47,622 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-13T13:34:47,623 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-13T13:34:47,623 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731504887622Disabling compacts and flushes for region at 1731504887622Disabling writes for close at 1731504887622Writing region close event to WAL at 1731504887622Closed at 1731504887622 2024-11-13T13:34:47,624 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-13T13:34:47,625 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-13T13:34:47,625 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-13T13:34:47,627 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-13T13:34:47,628 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-13T13:34:47,779 DEBUG [bfeb2336aed7:46715 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-13T13:34:47,779 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=bfeb2336aed7,41113,1731504886122 2024-11-13T13:34:47,781 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as bfeb2336aed7,41113,1731504886122, state=OPENING 2024-11-13T13:34:47,833 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-13T13:34:47,843 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41113-0x1013467cc540001, quorum=127.0.0.1:51842, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T13:34:47,843 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46715-0x1013467cc540000, quorum=127.0.0.1:51842, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T13:34:47,845 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-13T13:34:47,845 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-13T13:34:47,845 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-13T13:34:47,845 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=bfeb2336aed7,41113,1731504886122}] 2024-11-13T13:34:48,001 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-13T13:34:48,005 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56325, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-13T13:34:48,014 INFO [RS_OPEN_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-13T13:34:48,014 INFO [RS_OPEN_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-13T13:34:48,017 INFO [RS_OPEN_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=bfeb2336aed7%2C41113%2C1731504886122.meta, suffix=.meta, logDir=hdfs://localhost:46753/user/jenkins/test-data/195d6abf-411a-2b00-93e4-bb00d46a3cfe/WALs/bfeb2336aed7,41113,1731504886122, archiveDir=hdfs://localhost:46753/user/jenkins/test-data/195d6abf-411a-2b00-93e4-bb00d46a3cfe/oldWALs, maxLogs=32 2024-11-13T13:34:48,019 INFO [RS_OPEN_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor bfeb2336aed7%2C41113%2C1731504886122.meta.1731504888019.meta 2024-11-13T13:34:48,025 INFO [RS_OPEN_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/195d6abf-411a-2b00-93e4-bb00d46a3cfe/WALs/bfeb2336aed7,41113,1731504886122/bfeb2336aed7%2C41113%2C1731504886122.meta.1731504888019.meta 2024-11-13T13:34:48,026 DEBUG [RS_OPEN_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33439:33439),(127.0.0.1/127.0.0.1:37211:37211)] 2024-11-13T13:34:48,027 DEBUG [RS_OPEN_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-13T13:34:48,028 DEBUG [RS_OPEN_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-13T13:34:48,028 DEBUG [RS_OPEN_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-13T13:34:48,028 INFO [RS_OPEN_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-13T13:34:48,028 DEBUG [RS_OPEN_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-13T13:34:48,028 DEBUG [RS_OPEN_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T13:34:48,028 DEBUG [RS_OPEN_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-13T13:34:48,028 DEBUG [RS_OPEN_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-13T13:34:48,030 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-13T13:34:48,031 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-13T13:34:48,031 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T13:34:48,032 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T13:34:48,032 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-13T13:34:48,034 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-13T13:34:48,034 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T13:34:48,034 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T13:34:48,035 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-13T13:34:48,036 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-13T13:34:48,036 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T13:34:48,037 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T13:34:48,037 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-13T13:34:48,038 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-13T13:34:48,038 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T13:34:48,039 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T13:34:48,039 DEBUG [RS_OPEN_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-13T13:34:48,040 DEBUG [RS_OPEN_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46753/user/jenkins/test-data/195d6abf-411a-2b00-93e4-bb00d46a3cfe/data/hbase/meta/1588230740 2024-11-13T13:34:48,042 DEBUG [RS_OPEN_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46753/user/jenkins/test-data/195d6abf-411a-2b00-93e4-bb00d46a3cfe/data/hbase/meta/1588230740 2024-11-13T13:34:48,043 DEBUG [RS_OPEN_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-13T13:34:48,043 DEBUG [RS_OPEN_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-13T13:34:48,044 DEBUG [RS_OPEN_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-13T13:34:48,046 DEBUG [RS_OPEN_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-13T13:34:48,047 INFO [RS_OPEN_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=778379, jitterRate=-0.010240137577056885}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-13T13:34:48,047 DEBUG [RS_OPEN_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-13T13:34:48,049 DEBUG [RS_OPEN_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731504888028Writing region info on filesystem at 1731504888028Initializing all the Stores at 1731504888029 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731504888030 (+1 ms)Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731504888030Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731504888030Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731504888030Cleaning up temporary data from old regions at 1731504888043 (+13 ms)Running coprocessor post-open hooks at 1731504888047 (+4 ms)Region opened successfully at 1731504888048 (+1 ms) 2024-11-13T13:34:48,050 INFO [RS_OPEN_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731504888001 2024-11-13T13:34:48,053 DEBUG [RS_OPEN_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-13T13:34:48,053 INFO [RS_OPEN_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-13T13:34:48,054 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=bfeb2336aed7,41113,1731504886122 2024-11-13T13:34:48,055 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as bfeb2336aed7,41113,1731504886122, state=OPEN 2024-11-13T13:34:48,073 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-13T13:34:48,079 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:34:48,096 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:34:48,098 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46715-0x1013467cc540000, quorum=127.0.0.1:51842, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-13T13:34:48,098 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41113-0x1013467cc540001, quorum=127.0.0.1:51842, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-13T13:34:48,098 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=bfeb2336aed7,41113,1731504886122 2024-11-13T13:34:48,098 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-13T13:34:48,098 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-13T13:34:48,102 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-13T13:34:48,102 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=bfeb2336aed7,41113,1731504886122 in 253 msec 2024-11-13T13:34:48,106 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-13T13:34:48,107 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 477 msec 2024-11-13T13:34:48,108 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-13T13:34:48,108 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-13T13:34:48,109 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-13T13:34:48,109 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=bfeb2336aed7,41113,1731504886122, seqNum=-1] 2024-11-13T13:34:48,110 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-13T13:34:48,112 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59265, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-13T13:34:48,119 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 986 msec 2024-11-13T13:34:48,120 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731504888120, completionTime=-1 2024-11-13T13:34:48,120 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-13T13:34:48,120 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-13T13:34:48,122 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=1 2024-11-13T13:34:48,122 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731504948122 2024-11-13T13:34:48,122 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731505008122 2024-11-13T13:34:48,122 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 2 msec 2024-11-13T13:34:48,123 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=bfeb2336aed7,46715,1731504885949-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-13T13:34:48,123 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=bfeb2336aed7,46715,1731504885949-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-13T13:34:48,123 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=bfeb2336aed7,46715,1731504885949-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-13T13:34:48,123 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-bfeb2336aed7:46715, period=300000, unit=MILLISECONDS is enabled. 2024-11-13T13:34:48,123 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-13T13:34:48,123 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-13T13:34:48,125 DEBUG [master/bfeb2336aed7:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-13T13:34:48,128 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.926sec 2024-11-13T13:34:48,128 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-13T13:34:48,128 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-13T13:34:48,128 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-13T13:34:48,128 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-13T13:34:48,128 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-13T13:34:48,129 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=bfeb2336aed7,46715,1731504885949-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-13T13:34:48,129 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=bfeb2336aed7,46715,1731504885949-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-13T13:34:48,131 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-13T13:34:48,132 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-13T13:34:48,132 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=bfeb2336aed7,46715,1731504885949-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-13T13:34:48,161 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@47ca969, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T13:34:48,161 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request bfeb2336aed7,46715,-1 for getting cluster id 2024-11-13T13:34:48,161 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-13T13:34:48,163 DEBUG [HMaster-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'e0a63878-39b5-482f-ae13-e7095e981dc3' 2024-11-13T13:34:48,163 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-13T13:34:48,164 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "e0a63878-39b5-482f-ae13-e7095e981dc3" 2024-11-13T13:34:48,164 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5ca9c3be, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T13:34:48,164 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [bfeb2336aed7,46715,-1] 2024-11-13T13:34:48,165 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-13T13:34:48,165 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T13:34:48,166 INFO [HMaster-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36654, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-13T13:34:48,168 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@266531aa, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T13:34:48,168 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-13T13:34:48,169 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=bfeb2336aed7,41113,1731504886122, seqNum=-1] 2024-11-13T13:34:48,170 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-13T13:34:48,172 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58608, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-13T13:34:48,174 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=bfeb2336aed7,46715,1731504885949 2024-11-13T13:34:48,175 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T13:34:48,178 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-13T13:34:48,178 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-13T13:34:48,179 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-13T13:34:48,179 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at org.apache.hadoop.hbase.regionserver.wal.TestLogRolling.testLogRollOnDatanodeDeath(TestLogRolling.java:201) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-13T13:34:48,179 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T13:34:48,179 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T13:34:48,179 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-13T13:34:48,179 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-13T13:34:48,179 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1385371268, stopped=false 2024-11-13T13:34:48,179 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=bfeb2336aed7,46715,1731504885949 2024-11-13T13:34:48,201 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46715-0x1013467cc540000, quorum=127.0.0.1:51842, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-13T13:34:48,201 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41113-0x1013467cc540001, quorum=127.0.0.1:51842, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-13T13:34:48,201 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46715-0x1013467cc540000, quorum=127.0.0.1:51842, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T13:34:48,201 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41113-0x1013467cc540001, quorum=127.0.0.1:51842, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T13:34:48,201 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-13T13:34:48,202 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-13T13:34:48,202 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at org.apache.hadoop.hbase.regionserver.wal.TestLogRolling.testLogRollOnDatanodeDeath(TestLogRolling.java:201) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-13T13:34:48,202 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T13:34:48,202 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:41113-0x1013467cc540001, quorum=127.0.0.1:51842, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-13T13:34:48,202 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:46715-0x1013467cc540000, quorum=127.0.0.1:51842, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-13T13:34:48,202 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'bfeb2336aed7,41113,1731504886122' ***** 2024-11-13T13:34:48,202 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-13T13:34:48,203 INFO [RS:0;bfeb2336aed7:41113 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-13T13:34:48,203 INFO [RS:0;bfeb2336aed7:41113 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-13T13:34:48,203 INFO [RS:0;bfeb2336aed7:41113 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-13T13:34:48,203 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-13T13:34:48,203 INFO [RS:0;bfeb2336aed7:41113 {}] regionserver.HRegionServer(959): stopping server bfeb2336aed7,41113,1731504886122 2024-11-13T13:34:48,203 INFO [RS:0;bfeb2336aed7:41113 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-13T13:34:48,203 INFO [RS:0;bfeb2336aed7:41113 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;bfeb2336aed7:41113. 2024-11-13T13:34:48,203 DEBUG [RS:0;bfeb2336aed7:41113 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-13T13:34:48,203 DEBUG [RS:0;bfeb2336aed7:41113 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T13:34:48,203 INFO [RS:0;bfeb2336aed7:41113 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-13T13:34:48,203 INFO [RS:0;bfeb2336aed7:41113 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-13T13:34:48,203 INFO [RS:0;bfeb2336aed7:41113 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-13T13:34:48,203 INFO [RS:0;bfeb2336aed7:41113 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-13T13:34:48,204 INFO [RS:0;bfeb2336aed7:41113 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-13T13:34:48,204 DEBUG [RS:0;bfeb2336aed7:41113 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-11-13T13:34:48,204 DEBUG [RS:0;bfeb2336aed7:41113 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-13T13:34:48,204 DEBUG [RS_CLOSE_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-13T13:34:48,204 INFO [RS_CLOSE_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-13T13:34:48,204 DEBUG [RS_CLOSE_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-13T13:34:48,204 DEBUG [RS_CLOSE_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-13T13:34:48,204 DEBUG [RS_CLOSE_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-13T13:34:48,204 INFO [RS_CLOSE_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=74 B heapSize=1.22 KB 2024-11-13T13:34:48,222 DEBUG [RS_CLOSE_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46753/user/jenkins/test-data/195d6abf-411a-2b00-93e4-bb00d46a3cfe/data/hbase/meta/1588230740/.tmp/ns/91d68234f5244d0ab3bdac61b1145a5e is 43, key is default/ns:d/1731504888112/Put/seqid=0 2024-11-13T13:34:48,229 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45059 is added to blk_1073741835_1011 (size=5153) 2024-11-13T13:34:48,229 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35491 is added to blk_1073741835_1011 (size=5153) 2024-11-13T13:34:48,230 INFO [RS_CLOSE_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:46753/user/jenkins/test-data/195d6abf-411a-2b00-93e4-bb00d46a3cfe/data/hbase/meta/1588230740/.tmp/ns/91d68234f5244d0ab3bdac61b1145a5e 2024-11-13T13:34:48,234 INFO [regionserver/bfeb2336aed7:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-13T13:34:48,234 INFO [regionserver/bfeb2336aed7:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-13T13:34:48,239 DEBUG [RS_CLOSE_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46753/user/jenkins/test-data/195d6abf-411a-2b00-93e4-bb00d46a3cfe/data/hbase/meta/1588230740/.tmp/ns/91d68234f5244d0ab3bdac61b1145a5e as hdfs://localhost:46753/user/jenkins/test-data/195d6abf-411a-2b00-93e4-bb00d46a3cfe/data/hbase/meta/1588230740/ns/91d68234f5244d0ab3bdac61b1145a5e 2024-11-13T13:34:48,248 INFO [RS_CLOSE_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46753/user/jenkins/test-data/195d6abf-411a-2b00-93e4-bb00d46a3cfe/data/hbase/meta/1588230740/ns/91d68234f5244d0ab3bdac61b1145a5e, entries=2, sequenceid=6, filesize=5.0 K 2024-11-13T13:34:48,249 INFO [RS_CLOSE_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 45ms, sequenceid=6, compaction requested=false 2024-11-13T13:34:48,249 DEBUG [RS_CLOSE_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-13T13:34:48,255 DEBUG [RS_CLOSE_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46753/user/jenkins/test-data/195d6abf-411a-2b00-93e4-bb00d46a3cfe/data/hbase/meta/1588230740/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-13T13:34:48,256 DEBUG [RS_CLOSE_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-13T13:34:48,256 INFO [RS_CLOSE_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-13T13:34:48,256 DEBUG [RS_CLOSE_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731504888204Running coprocessor pre-close hooks at 1731504888204Disabling compacts and flushes for region at 1731504888204Disabling writes for close at 1731504888204Obtaining lock to block concurrent updates at 1731504888204Preparing flush snapshotting stores in 1588230740 at 1731504888204Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=74, getHeapSize=1184, getOffHeapSize=0, getCellsCount=2 at 1731504888205 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1731504888205Flushing 1588230740/ns: creating writer at 1731504888206 (+1 ms)Flushing 1588230740/ns: appending metadata at 1731504888221 (+15 ms)Flushing 1588230740/ns: closing flushed file at 1731504888221Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1fed18e1: reopening flushed file at 1731504888238 (+17 ms)Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 45ms, sequenceid=6, compaction requested=false at 1731504888249 (+11 ms)Writing region close event to WAL at 1731504888251 (+2 ms)Running coprocessor post-close hooks at 1731504888256 (+5 ms)Closed at 1731504888256 2024-11-13T13:34:48,256 DEBUG [RS_CLOSE_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-13T13:34:48,404 INFO [RS:0;bfeb2336aed7:41113 {}] regionserver.HRegionServer(976): stopping server bfeb2336aed7,41113,1731504886122; all regions closed. 2024-11-13T13:34:48,405 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:34:48,405 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:34:48,405 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:34:48,406 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:34:48,406 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:34:48,409 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35491 is added to blk_1073741834_1010 (size=1152) 2024-11-13T13:34:48,410 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45059 is added to blk_1073741834_1010 (size=1152) 2024-11-13T13:34:48,415 DEBUG [RS:0;bfeb2336aed7:41113 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/195d6abf-411a-2b00-93e4-bb00d46a3cfe/oldWALs 2024-11-13T13:34:48,415 INFO [RS:0;bfeb2336aed7:41113 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog bfeb2336aed7%2C41113%2C1731504886122.meta:.meta(num 1731504888019) 2024-11-13T13:34:48,416 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:34:48,416 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:34:48,417 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:34:48,417 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:34:48,417 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:34:48,420 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35491 is added to blk_1073741832_1008 (size=93) 2024-11-13T13:34:48,420 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45059 is added to blk_1073741832_1008 (size=93) 2024-11-13T13:34:48,423 DEBUG [RS:0;bfeb2336aed7:41113 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/195d6abf-411a-2b00-93e4-bb00d46a3cfe/oldWALs 2024-11-13T13:34:48,423 INFO [RS:0;bfeb2336aed7:41113 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog bfeb2336aed7%2C41113%2C1731504886122:(num 1731504887368) 2024-11-13T13:34:48,423 DEBUG [RS:0;bfeb2336aed7:41113 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T13:34:48,423 INFO [RS:0;bfeb2336aed7:41113 {}] regionserver.LeaseManager(133): Closed leases 2024-11-13T13:34:48,423 INFO [RS:0;bfeb2336aed7:41113 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-13T13:34:48,424 INFO [RS:0;bfeb2336aed7:41113 {}] hbase.ChoreService(370): Chore service for: regionserver/bfeb2336aed7:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-13T13:34:48,424 INFO [RS:0;bfeb2336aed7:41113 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-13T13:34:48,424 INFO [regionserver/bfeb2336aed7:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-13T13:34:48,424 INFO [RS:0;bfeb2336aed7:41113 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:41113 2024-11-13T13:34:48,433 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41113-0x1013467cc540001, quorum=127.0.0.1:51842, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/bfeb2336aed7,41113,1731504886122 2024-11-13T13:34:48,433 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46715-0x1013467cc540000, quorum=127.0.0.1:51842, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-13T13:34:48,433 INFO [RS:0;bfeb2336aed7:41113 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-13T13:34:48,443 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [bfeb2336aed7,41113,1731504886122] 2024-11-13T13:34:48,454 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/bfeb2336aed7,41113,1731504886122 already deleted, retry=false 2024-11-13T13:34:48,454 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; bfeb2336aed7,41113,1731504886122 expired; onlineServers=0 2024-11-13T13:34:48,454 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'bfeb2336aed7,46715,1731504885949' ***** 2024-11-13T13:34:48,454 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-13T13:34:48,455 INFO [M:0;bfeb2336aed7:46715 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-13T13:34:48,455 INFO [M:0;bfeb2336aed7:46715 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-13T13:34:48,455 DEBUG [M:0;bfeb2336aed7:46715 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-13T13:34:48,455 DEBUG [M:0;bfeb2336aed7:46715 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-13T13:34:48,455 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-13T13:34:48,455 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster-HFileCleaner.small.0-1731504887145 {}] cleaner.HFileCleaner(306): Exit Thread[master/bfeb2336aed7:0:becomeActiveMaster-HFileCleaner.small.0-1731504887145,5,FailOnTimeoutGroup] 2024-11-13T13:34:48,455 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster-HFileCleaner.large.0-1731504887145 {}] cleaner.HFileCleaner(306): Exit Thread[master/bfeb2336aed7:0:becomeActiveMaster-HFileCleaner.large.0-1731504887145,5,FailOnTimeoutGroup] 2024-11-13T13:34:48,456 INFO [M:0;bfeb2336aed7:46715 {}] hbase.ChoreService(370): Chore service for: master/bfeb2336aed7:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-13T13:34:48,456 INFO [M:0;bfeb2336aed7:46715 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-13T13:34:48,456 DEBUG [M:0;bfeb2336aed7:46715 {}] master.HMaster(1795): Stopping service threads 2024-11-13T13:34:48,457 INFO [M:0;bfeb2336aed7:46715 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-13T13:34:48,457 INFO [M:0;bfeb2336aed7:46715 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-13T13:34:48,457 INFO [M:0;bfeb2336aed7:46715 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-13T13:34:48,457 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-13T13:34:48,464 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46715-0x1013467cc540000, quorum=127.0.0.1:51842, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-13T13:34:48,465 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46715-0x1013467cc540000, quorum=127.0.0.1:51842, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T13:34:48,465 DEBUG [M:0;bfeb2336aed7:46715 {}] zookeeper.ZKUtil(347): master:46715-0x1013467cc540000, quorum=127.0.0.1:51842, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-13T13:34:48,465 WARN [M:0;bfeb2336aed7:46715 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-13T13:34:48,465 INFO [M:0;bfeb2336aed7:46715 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:46753/user/jenkins/test-data/195d6abf-411a-2b00-93e4-bb00d46a3cfe/.lastflushedseqids 2024-11-13T13:34:48,473 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45059 is added to blk_1073741836_1012 (size=99) 2024-11-13T13:34:48,473 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35491 is added to blk_1073741836_1012 (size=99) 2024-11-13T13:34:48,474 INFO [M:0;bfeb2336aed7:46715 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-13T13:34:48,474 INFO [M:0;bfeb2336aed7:46715 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-13T13:34:48,474 DEBUG [M:0;bfeb2336aed7:46715 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-13T13:34:48,474 INFO [M:0;bfeb2336aed7:46715 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T13:34:48,474 DEBUG [M:0;bfeb2336aed7:46715 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T13:34:48,474 DEBUG [M:0;bfeb2336aed7:46715 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-13T13:34:48,474 DEBUG [M:0;bfeb2336aed7:46715 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T13:34:48,474 INFO [M:0;bfeb2336aed7:46715 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=7.67 KB heapSize=11.34 KB 2024-11-13T13:34:48,499 DEBUG [M:0;bfeb2336aed7:46715 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46753/user/jenkins/test-data/195d6abf-411a-2b00-93e4-bb00d46a3cfe/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/03bcab0f971446dbb57051b954387abd is 82, key is hbase:meta,,1/info:regioninfo/1731504888054/Put/seqid=0 2024-11-13T13:34:48,506 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45059 is added to blk_1073741837_1013 (size=5672) 2024-11-13T13:34:48,506 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35491 is added to blk_1073741837_1013 (size=5672) 2024-11-13T13:34:48,507 INFO [M:0;bfeb2336aed7:46715 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:46753/user/jenkins/test-data/195d6abf-411a-2b00-93e4-bb00d46a3cfe/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/03bcab0f971446dbb57051b954387abd 2024-11-13T13:34:48,544 INFO [RS:0;bfeb2336aed7:41113 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-13T13:34:48,544 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41113-0x1013467cc540001, quorum=127.0.0.1:51842, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-13T13:34:48,544 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41113-0x1013467cc540001, quorum=127.0.0.1:51842, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-13T13:34:48,544 INFO [RS:0;bfeb2336aed7:41113 {}] regionserver.HRegionServer(1031): Exiting; stopping=bfeb2336aed7,41113,1731504886122; zookeeper connection closed. 2024-11-13T13:34:48,544 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@3261a6ac {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@3261a6ac 2024-11-13T13:34:48,544 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-13T13:34:48,547 DEBUG [M:0;bfeb2336aed7:46715 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46753/user/jenkins/test-data/195d6abf-411a-2b00-93e4-bb00d46a3cfe/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/7f47c950bed24854bbe2ffb6bcba852b is 240, key is \x00\x00\x00\x00\x00\x00\x00\x01/proc:d/1731504888118/Put/seqid=0 2024-11-13T13:34:48,556 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35491 is added to blk_1073741838_1014 (size=5275) 2024-11-13T13:34:48,557 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45059 is added to blk_1073741838_1014 (size=5275) 2024-11-13T13:34:48,557 INFO [M:0;bfeb2336aed7:46715 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.06 KB at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:46753/user/jenkins/test-data/195d6abf-411a-2b00-93e4-bb00d46a3cfe/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/7f47c950bed24854bbe2ffb6bcba852b 2024-11-13T13:34:48,578 DEBUG [M:0;bfeb2336aed7:46715 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46753/user/jenkins/test-data/195d6abf-411a-2b00-93e4-bb00d46a3cfe/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/0f809be0b2a64248a44ba0d4c9bcee9c is 69, key is bfeb2336aed7,41113,1731504886122/rs:state/1731504887204/Put/seqid=0 2024-11-13T13:34:48,583 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45059 is added to blk_1073741839_1015 (size=5156) 2024-11-13T13:34:48,583 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35491 is added to blk_1073741839_1015 (size=5156) 2024-11-13T13:34:48,584 INFO [M:0;bfeb2336aed7:46715 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:46753/user/jenkins/test-data/195d6abf-411a-2b00-93e4-bb00d46a3cfe/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/0f809be0b2a64248a44ba0d4c9bcee9c 2024-11-13T13:34:48,605 DEBUG [M:0;bfeb2336aed7:46715 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46753/user/jenkins/test-data/195d6abf-411a-2b00-93e4-bb00d46a3cfe/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/ee4a88d5f8de4356bfbab99caca8cc22 is 52, key is load_balancer_on/state:d/1731504888177/Put/seqid=0 2024-11-13T13:34:48,611 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35491 is added to blk_1073741840_1016 (size=5056) 2024-11-13T13:34:48,611 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45059 is added to blk_1073741840_1016 (size=5056) 2024-11-13T13:34:48,611 INFO [M:0;bfeb2336aed7:46715 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:46753/user/jenkins/test-data/195d6abf-411a-2b00-93e4-bb00d46a3cfe/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/ee4a88d5f8de4356bfbab99caca8cc22 2024-11-13T13:34:48,618 DEBUG [M:0;bfeb2336aed7:46715 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46753/user/jenkins/test-data/195d6abf-411a-2b00-93e4-bb00d46a3cfe/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/03bcab0f971446dbb57051b954387abd as hdfs://localhost:46753/user/jenkins/test-data/195d6abf-411a-2b00-93e4-bb00d46a3cfe/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/03bcab0f971446dbb57051b954387abd 2024-11-13T13:34:48,623 INFO [M:0;bfeb2336aed7:46715 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46753/user/jenkins/test-data/195d6abf-411a-2b00-93e4-bb00d46a3cfe/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/03bcab0f971446dbb57051b954387abd, entries=8, sequenceid=29, filesize=5.5 K 2024-11-13T13:34:48,624 DEBUG [M:0;bfeb2336aed7:46715 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46753/user/jenkins/test-data/195d6abf-411a-2b00-93e4-bb00d46a3cfe/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/7f47c950bed24854bbe2ffb6bcba852b as hdfs://localhost:46753/user/jenkins/test-data/195d6abf-411a-2b00-93e4-bb00d46a3cfe/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/7f47c950bed24854bbe2ffb6bcba852b 2024-11-13T13:34:48,630 INFO [M:0;bfeb2336aed7:46715 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46753/user/jenkins/test-data/195d6abf-411a-2b00-93e4-bb00d46a3cfe/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/7f47c950bed24854bbe2ffb6bcba852b, entries=3, sequenceid=29, filesize=5.2 K 2024-11-13T13:34:48,631 DEBUG [M:0;bfeb2336aed7:46715 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46753/user/jenkins/test-data/195d6abf-411a-2b00-93e4-bb00d46a3cfe/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/0f809be0b2a64248a44ba0d4c9bcee9c as hdfs://localhost:46753/user/jenkins/test-data/195d6abf-411a-2b00-93e4-bb00d46a3cfe/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/0f809be0b2a64248a44ba0d4c9bcee9c 2024-11-13T13:34:48,638 INFO [M:0;bfeb2336aed7:46715 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46753/user/jenkins/test-data/195d6abf-411a-2b00-93e4-bb00d46a3cfe/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/0f809be0b2a64248a44ba0d4c9bcee9c, entries=1, sequenceid=29, filesize=5.0 K 2024-11-13T13:34:48,639 DEBUG [M:0;bfeb2336aed7:46715 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46753/user/jenkins/test-data/195d6abf-411a-2b00-93e4-bb00d46a3cfe/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/ee4a88d5f8de4356bfbab99caca8cc22 as hdfs://localhost:46753/user/jenkins/test-data/195d6abf-411a-2b00-93e4-bb00d46a3cfe/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/ee4a88d5f8de4356bfbab99caca8cc22 2024-11-13T13:34:48,646 INFO [M:0;bfeb2336aed7:46715 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46753/user/jenkins/test-data/195d6abf-411a-2b00-93e4-bb00d46a3cfe/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/ee4a88d5f8de4356bfbab99caca8cc22, entries=1, sequenceid=29, filesize=4.9 K 2024-11-13T13:34:48,648 INFO [M:0;bfeb2336aed7:46715 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 174ms, sequenceid=29, compaction requested=false 2024-11-13T13:34:48,650 INFO [M:0;bfeb2336aed7:46715 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T13:34:48,650 DEBUG [M:0;bfeb2336aed7:46715 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731504888474Disabling compacts and flushes for region at 1731504888474Disabling writes for close at 1731504888474Obtaining lock to block concurrent updates at 1731504888474Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731504888474Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=7850, getHeapSize=11544, getOffHeapSize=0, getCellsCount=36 at 1731504888475 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731504888476 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731504888476Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731504888498 (+22 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731504888498Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731504888519 (+21 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731504888546 (+27 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731504888546Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731504888563 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731504888577 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731504888577Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731504888589 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731504888605 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731504888605Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@787ccb75: reopening flushed file at 1731504888617 (+12 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4f3ce074: reopening flushed file at 1731504888623 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4b19b043: reopening flushed file at 1731504888630 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@199ba85b: reopening flushed file at 1731504888638 (+8 ms)Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 174ms, sequenceid=29, compaction requested=false at 1731504888648 (+10 ms)Writing region close event to WAL at 1731504888650 (+2 ms)Closed at 1731504888650 2024-11-13T13:34:48,651 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:34:48,651 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:34:48,651 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:34:48,651 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:34:48,651 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:34:48,653 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35491 is added to blk_1073741830_1006 (size=10311) 2024-11-13T13:34:48,654 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45059 is added to blk_1073741830_1006 (size=10311) 2024-11-13T13:34:48,654 INFO [M:0;bfeb2336aed7:46715 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-13T13:34:48,654 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-13T13:34:48,655 INFO [M:0;bfeb2336aed7:46715 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:46715 2024-11-13T13:34:48,655 INFO [M:0;bfeb2336aed7:46715 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-13T13:34:48,765 INFO [M:0;bfeb2336aed7:46715 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-13T13:34:48,765 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46715-0x1013467cc540000, quorum=127.0.0.1:51842, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-13T13:34:48,765 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46715-0x1013467cc540000, quorum=127.0.0.1:51842, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-13T13:34:48,804 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@210f4ee9{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-13T13:34:48,805 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@1dedf320{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-13T13:34:48,805 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-13T13:34:48,805 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2220be00{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-13T13:34:48,806 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@17d00685{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7dd39109-2b38-9fe5-bca7-7e0f5100da8a/hadoop.log.dir/,STOPPED} 2024-11-13T13:34:48,808 WARN [BP-1458020420-172.17.0.2-1731504883629 heartbeating to localhost/127.0.0.1:46753 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-13T13:34:48,808 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-13T13:34:48,808 WARN [BP-1458020420-172.17.0.2-1731504883629 heartbeating to localhost/127.0.0.1:46753 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1458020420-172.17.0.2-1731504883629 (Datanode Uuid 7c0d5e38-08a6-4860-be56-6593de5a6caf) service to localhost/127.0.0.1:46753 2024-11-13T13:34:48,808 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-13T13:34:48,809 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7dd39109-2b38-9fe5-bca7-7e0f5100da8a/cluster_f03ad9ef-4785-3bcf-8cc3-d268983a1bf4/data/data3/current/BP-1458020420-172.17.0.2-1731504883629 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-13T13:34:48,810 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7dd39109-2b38-9fe5-bca7-7e0f5100da8a/cluster_f03ad9ef-4785-3bcf-8cc3-d268983a1bf4/data/data4/current/BP-1458020420-172.17.0.2-1731504883629 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-13T13:34:48,810 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-13T13:34:48,813 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@426dc4fa{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-13T13:34:48,813 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@417f4bff{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-13T13:34:48,813 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-13T13:34:48,814 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3b01355c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-13T13:34:48,814 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@21e00560{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7dd39109-2b38-9fe5-bca7-7e0f5100da8a/hadoop.log.dir/,STOPPED} 2024-11-13T13:34:48,815 WARN [BP-1458020420-172.17.0.2-1731504883629 heartbeating to localhost/127.0.0.1:46753 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-13T13:34:48,815 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-13T13:34:48,815 WARN [BP-1458020420-172.17.0.2-1731504883629 heartbeating to localhost/127.0.0.1:46753 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1458020420-172.17.0.2-1731504883629 (Datanode Uuid 88bcda19-8afb-4636-a539-065f94099559) service to localhost/127.0.0.1:46753 2024-11-13T13:34:48,815 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-13T13:34:48,816 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7dd39109-2b38-9fe5-bca7-7e0f5100da8a/cluster_f03ad9ef-4785-3bcf-8cc3-d268983a1bf4/data/data1/current/BP-1458020420-172.17.0.2-1731504883629 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-13T13:34:48,816 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7dd39109-2b38-9fe5-bca7-7e0f5100da8a/cluster_f03ad9ef-4785-3bcf-8cc3-d268983a1bf4/data/data2/current/BP-1458020420-172.17.0.2-1731504883629 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-13T13:34:48,816 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-13T13:34:48,822 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@81550dd{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-13T13:34:48,822 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@2d71062d{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-13T13:34:48,822 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-13T13:34:48,822 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@542ee468{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-13T13:34:48,823 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2152d149{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7dd39109-2b38-9fe5-bca7-7e0f5100da8a/hadoop.log.dir/,STOPPED} 2024-11-13T13:34:48,828 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-13T13:34:48,846 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-13T13:34:48,846 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-13T13:34:48,847 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7dd39109-2b38-9fe5-bca7-7e0f5100da8a/hadoop.log.dir so I do NOT create it in target/test-data/210997d4-e94b-09b4-185f-d9911e45a816 2024-11-13T13:34:48,847 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7dd39109-2b38-9fe5-bca7-7e0f5100da8a/hadoop.tmp.dir so I do NOT create it in target/test-data/210997d4-e94b-09b4-185f-d9911e45a816 2024-11-13T13:34:48,847 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/210997d4-e94b-09b4-185f-d9911e45a816/cluster_4b5f2985-1792-e678-40f5-a273020dd5a5, deleteOnExit=true 2024-11-13T13:34:48,847 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-13T13:34:48,847 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/210997d4-e94b-09b4-185f-d9911e45a816/test.cache.data in system properties and HBase conf 2024-11-13T13:34:48,847 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/210997d4-e94b-09b4-185f-d9911e45a816/hadoop.tmp.dir in system properties and HBase conf 2024-11-13T13:34:48,847 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/210997d4-e94b-09b4-185f-d9911e45a816/hadoop.log.dir in system properties and HBase conf 2024-11-13T13:34:48,847 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/210997d4-e94b-09b4-185f-d9911e45a816/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-13T13:34:48,847 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/210997d4-e94b-09b4-185f-d9911e45a816/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-13T13:34:48,847 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-13T13:34:48,847 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-13T13:34:48,848 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/210997d4-e94b-09b4-185f-d9911e45a816/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-13T13:34:48,848 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/210997d4-e94b-09b4-185f-d9911e45a816/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-13T13:34:48,848 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/210997d4-e94b-09b4-185f-d9911e45a816/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-13T13:34:48,848 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/210997d4-e94b-09b4-185f-d9911e45a816/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-13T13:34:48,848 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/210997d4-e94b-09b4-185f-d9911e45a816/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-13T13:34:48,848 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/210997d4-e94b-09b4-185f-d9911e45a816/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-13T13:34:48,848 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/210997d4-e94b-09b4-185f-d9911e45a816/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-13T13:34:48,848 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/210997d4-e94b-09b4-185f-d9911e45a816/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-13T13:34:48,848 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/210997d4-e94b-09b4-185f-d9911e45a816/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-13T13:34:48,848 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/210997d4-e94b-09b4-185f-d9911e45a816/nfs.dump.dir in system properties and HBase conf 2024-11-13T13:34:48,848 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/210997d4-e94b-09b4-185f-d9911e45a816/java.io.tmpdir in system properties and HBase conf 2024-11-13T13:34:48,848 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/210997d4-e94b-09b4-185f-d9911e45a816/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-13T13:34:48,849 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/210997d4-e94b-09b4-185f-d9911e45a816/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-13T13:34:48,849 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/210997d4-e94b-09b4-185f-d9911e45a816/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-13T13:34:48,860 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-13T13:34:49,199 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-13T13:34:49,204 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-13T13:34:49,208 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-13T13:34:49,208 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-13T13:34:49,208 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-13T13:34:49,209 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-13T13:34:49,209 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@8f6e525{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/210997d4-e94b-09b4-185f-d9911e45a816/hadoop.log.dir/,AVAILABLE} 2024-11-13T13:34:49,210 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@38184680{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-13T13:34:49,228 INFO [regionserver/bfeb2336aed7:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-13T13:34:49,302 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7eee535{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/210997d4-e94b-09b4-185f-d9911e45a816/java.io.tmpdir/jetty-localhost-44025-hadoop-hdfs-3_4_1-tests_jar-_-any-11887787388184489748/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-13T13:34:49,303 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@589d0492{HTTP/1.1, (http/1.1)}{localhost:44025} 2024-11-13T13:34:49,303 INFO [Time-limited test {}] server.Server(415): Started @103990ms 2024-11-13T13:34:49,315 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-13T13:34:49,581 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-13T13:34:49,585 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-13T13:34:49,586 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-13T13:34:49,586 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-13T13:34:49,586 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-13T13:34:49,588 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4549eece{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/210997d4-e94b-09b4-185f-d9911e45a816/hadoop.log.dir/,AVAILABLE} 2024-11-13T13:34:49,588 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4c80aceb{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-13T13:34:49,680 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7efd52f6{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/210997d4-e94b-09b4-185f-d9911e45a816/java.io.tmpdir/jetty-localhost-43857-hadoop-hdfs-3_4_1-tests_jar-_-any-2881298809029494544/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-13T13:34:49,680 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@21f2acf7{HTTP/1.1, (http/1.1)}{localhost:43857} 2024-11-13T13:34:49,680 INFO [Time-limited test {}] server.Server(415): Started @104368ms 2024-11-13T13:34:49,682 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-13T13:34:49,708 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-13T13:34:49,712 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-13T13:34:49,714 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-13T13:34:49,714 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-13T13:34:49,714 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-13T13:34:49,714 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@d35fd72{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/210997d4-e94b-09b4-185f-d9911e45a816/hadoop.log.dir/,AVAILABLE} 2024-11-13T13:34:49,715 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@401bd933{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-13T13:34:49,808 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@30e8c2b{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/210997d4-e94b-09b4-185f-d9911e45a816/java.io.tmpdir/jetty-localhost-41843-hadoop-hdfs-3_4_1-tests_jar-_-any-1023724956594271491/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-13T13:34:49,808 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@2dc8ddff{HTTP/1.1, (http/1.1)}{localhost:41843} 2024-11-13T13:34:49,808 INFO [Time-limited test {}] server.Server(415): Started @104496ms 2024-11-13T13:34:49,810 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-13T13:34:50,807 WARN [Thread-667 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/210997d4-e94b-09b4-185f-d9911e45a816/cluster_4b5f2985-1792-e678-40f5-a273020dd5a5/data/data2/current/BP-1828668861-172.17.0.2-1731504888871/current, will proceed with Du for space computation calculation, 2024-11-13T13:34:50,807 WARN [Thread-666 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/210997d4-e94b-09b4-185f-d9911e45a816/cluster_4b5f2985-1792-e678-40f5-a273020dd5a5/data/data1/current/BP-1828668861-172.17.0.2-1731504888871/current, will proceed with Du for space computation calculation, 2024-11-13T13:34:50,827 WARN [Thread-630 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-13T13:34:50,830 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xc2d1cbd9b2bfd0e7 with lease ID 0x2515494a6e271d73: Processing first storage report for DS-b26c0838-0080-4619-916b-50cda0a016e8 from datanode DatanodeRegistration(127.0.0.1:35467, datanodeUuid=2aebb4ac-4601-45ab-99a7-bb640f705766, infoPort=36115, infoSecurePort=0, ipcPort=38563, storageInfo=lv=-57;cid=testClusterID;nsid=373864911;c=1731504888871) 2024-11-13T13:34:50,830 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xc2d1cbd9b2bfd0e7 with lease ID 0x2515494a6e271d73: from storage DS-b26c0838-0080-4619-916b-50cda0a016e8 node DatanodeRegistration(127.0.0.1:35467, datanodeUuid=2aebb4ac-4601-45ab-99a7-bb640f705766, infoPort=36115, infoSecurePort=0, ipcPort=38563, storageInfo=lv=-57;cid=testClusterID;nsid=373864911;c=1731504888871), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-13T13:34:50,830 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xc2d1cbd9b2bfd0e7 with lease ID 0x2515494a6e271d73: Processing first storage report for DS-d9b8e07e-e47f-414c-8328-f291c0f8f86b from datanode DatanodeRegistration(127.0.0.1:35467, datanodeUuid=2aebb4ac-4601-45ab-99a7-bb640f705766, infoPort=36115, infoSecurePort=0, ipcPort=38563, storageInfo=lv=-57;cid=testClusterID;nsid=373864911;c=1731504888871) 2024-11-13T13:34:50,830 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xc2d1cbd9b2bfd0e7 with lease ID 0x2515494a6e271d73: from storage DS-d9b8e07e-e47f-414c-8328-f291c0f8f86b node DatanodeRegistration(127.0.0.1:35467, datanodeUuid=2aebb4ac-4601-45ab-99a7-bb640f705766, infoPort=36115, infoSecurePort=0, ipcPort=38563, storageInfo=lv=-57;cid=testClusterID;nsid=373864911;c=1731504888871), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-13T13:34:50,938 WARN [Thread-678 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/210997d4-e94b-09b4-185f-d9911e45a816/cluster_4b5f2985-1792-e678-40f5-a273020dd5a5/data/data4/current/BP-1828668861-172.17.0.2-1731504888871/current, will proceed with Du for space computation calculation, 2024-11-13T13:34:50,938 WARN [Thread-677 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/210997d4-e94b-09b4-185f-d9911e45a816/cluster_4b5f2985-1792-e678-40f5-a273020dd5a5/data/data3/current/BP-1828668861-172.17.0.2-1731504888871/current, will proceed with Du for space computation calculation, 2024-11-13T13:34:50,960 WARN [Thread-653 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-13T13:34:50,963 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xff8326fa1c3cb394 with lease ID 0x2515494a6e271d74: Processing first storage report for DS-077399b2-6c0a-44a1-87c0-773de642c11b from datanode DatanodeRegistration(127.0.0.1:43767, datanodeUuid=e06bd827-5889-475e-8fba-56aa016f4c7b, infoPort=33095, infoSecurePort=0, ipcPort=41595, storageInfo=lv=-57;cid=testClusterID;nsid=373864911;c=1731504888871) 2024-11-13T13:34:50,963 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xff8326fa1c3cb394 with lease ID 0x2515494a6e271d74: from storage DS-077399b2-6c0a-44a1-87c0-773de642c11b node DatanodeRegistration(127.0.0.1:43767, datanodeUuid=e06bd827-5889-475e-8fba-56aa016f4c7b, infoPort=33095, infoSecurePort=0, ipcPort=41595, storageInfo=lv=-57;cid=testClusterID;nsid=373864911;c=1731504888871), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-13T13:34:50,963 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xff8326fa1c3cb394 with lease ID 0x2515494a6e271d74: Processing first storage report for DS-a47574ed-07ff-4670-8b38-e596d474dbf4 from datanode DatanodeRegistration(127.0.0.1:43767, datanodeUuid=e06bd827-5889-475e-8fba-56aa016f4c7b, infoPort=33095, infoSecurePort=0, ipcPort=41595, storageInfo=lv=-57;cid=testClusterID;nsid=373864911;c=1731504888871) 2024-11-13T13:34:50,963 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xff8326fa1c3cb394 with lease ID 0x2515494a6e271d74: from storage DS-a47574ed-07ff-4670-8b38-e596d474dbf4 node DatanodeRegistration(127.0.0.1:43767, datanodeUuid=e06bd827-5889-475e-8fba-56aa016f4c7b, infoPort=33095, infoSecurePort=0, ipcPort=41595, storageInfo=lv=-57;cid=testClusterID;nsid=373864911;c=1731504888871), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-13T13:34:51,051 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/210997d4-e94b-09b4-185f-d9911e45a816 2024-11-13T13:34:51,055 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/210997d4-e94b-09b4-185f-d9911e45a816/cluster_4b5f2985-1792-e678-40f5-a273020dd5a5/zookeeper_0, clientPort=56840, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/210997d4-e94b-09b4-185f-d9911e45a816/cluster_4b5f2985-1792-e678-40f5-a273020dd5a5/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/210997d4-e94b-09b4-185f-d9911e45a816/cluster_4b5f2985-1792-e678-40f5-a273020dd5a5/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-13T13:34:51,057 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=56840 2024-11-13T13:34:51,058 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T13:34:51,060 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T13:34:51,070 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43767 is added to blk_1073741825_1001 (size=7) 2024-11-13T13:34:51,071 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35467 is added to blk_1073741825_1001 (size=7) 2024-11-13T13:34:51,072 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239 with version=8 2024-11-13T13:34:51,072 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:38779/user/jenkins/test-data/52d71c14-a597-0de0-63d6-b75f134ce0ba/hbase-staging 2024-11-13T13:34:51,075 INFO [Time-limited test {}] client.ConnectionUtils(128): master/bfeb2336aed7:0 server-side Connection retries=45 2024-11-13T13:34:51,075 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-13T13:34:51,075 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-13T13:34:51,075 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-13T13:34:51,075 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-13T13:34:51,075 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-13T13:34:51,075 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-13T13:34:51,076 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-13T13:34:51,076 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:39113 2024-11-13T13:34:51,078 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:39113 connecting to ZooKeeper ensemble=127.0.0.1:56840 2024-11-13T13:34:51,136 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:391130x0, quorum=127.0.0.1:56840, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-13T13:34:51,137 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:39113-0x1013467e0590000 connected 2024-11-13T13:34:51,222 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T13:34:51,224 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T13:34:51,227 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:39113-0x1013467e0590000, quorum=127.0.0.1:56840, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-13T13:34:51,227 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239, hbase.cluster.distributed=false 2024-11-13T13:34:51,229 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:39113-0x1013467e0590000, quorum=127.0.0.1:56840, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-13T13:34:51,229 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=39113 2024-11-13T13:34:51,230 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=39113 2024-11-13T13:34:51,230 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=39113 2024-11-13T13:34:51,230 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=39113 2024-11-13T13:34:51,230 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=39113 2024-11-13T13:34:51,245 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/bfeb2336aed7:0 server-side Connection retries=45 2024-11-13T13:34:51,245 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-13T13:34:51,245 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-13T13:34:51,245 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-13T13:34:51,245 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-13T13:34:51,245 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-13T13:34:51,245 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-13T13:34:51,245 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-13T13:34:51,246 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:33523 2024-11-13T13:34:51,247 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:33523 connecting to ZooKeeper ensemble=127.0.0.1:56840 2024-11-13T13:34:51,248 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T13:34:51,250 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T13:34:51,265 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:335230x0, quorum=127.0.0.1:56840, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-13T13:34:51,265 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:335230x0, quorum=127.0.0.1:56840, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-13T13:34:51,265 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:33523-0x1013467e0590001 connected 2024-11-13T13:34:51,265 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-13T13:34:51,266 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-13T13:34:51,266 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33523-0x1013467e0590001, quorum=127.0.0.1:56840, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-13T13:34:51,268 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33523-0x1013467e0590001, quorum=127.0.0.1:56840, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-13T13:34:51,268 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=33523 2024-11-13T13:34:51,268 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=33523 2024-11-13T13:34:51,269 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=33523 2024-11-13T13:34:51,269 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=33523 2024-11-13T13:34:51,269 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=33523 2024-11-13T13:34:51,281 DEBUG [M:0;bfeb2336aed7:39113 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;bfeb2336aed7:39113 2024-11-13T13:34:51,281 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/bfeb2336aed7,39113,1731504891075 2024-11-13T13:34:51,295 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33523-0x1013467e0590001, quorum=127.0.0.1:56840, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-13T13:34:51,295 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39113-0x1013467e0590000, quorum=127.0.0.1:56840, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-13T13:34:51,295 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:39113-0x1013467e0590000, quorum=127.0.0.1:56840, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/bfeb2336aed7,39113,1731504891075 2024-11-13T13:34:51,306 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33523-0x1013467e0590001, quorum=127.0.0.1:56840, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-13T13:34:51,306 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39113-0x1013467e0590000, quorum=127.0.0.1:56840, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T13:34:51,307 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33523-0x1013467e0590001, quorum=127.0.0.1:56840, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T13:34:51,307 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:39113-0x1013467e0590000, quorum=127.0.0.1:56840, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-13T13:34:51,308 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/bfeb2336aed7,39113,1731504891075 from backup master directory 2024-11-13T13:34:51,317 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33523-0x1013467e0590001, quorum=127.0.0.1:56840, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-13T13:34:51,317 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39113-0x1013467e0590000, quorum=127.0.0.1:56840, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/bfeb2336aed7,39113,1731504891075 2024-11-13T13:34:51,317 WARN [master/bfeb2336aed7:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-13T13:34:51,317 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39113-0x1013467e0590000, quorum=127.0.0.1:56840, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-13T13:34:51,317 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=bfeb2336aed7,39113,1731504891075 2024-11-13T13:34:51,322 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/hbase.id] with ID: 16d2b140-ea7f-4b17-a840-ee87a9e64c93 2024-11-13T13:34:51,322 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/.tmp/hbase.id 2024-11-13T13:34:51,328 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35467 is added to blk_1073741826_1002 (size=42) 2024-11-13T13:34:51,328 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43767 is added to blk_1073741826_1002 (size=42) 2024-11-13T13:34:51,329 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/.tmp/hbase.id]:[hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/hbase.id] 2024-11-13T13:34:51,342 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T13:34:51,342 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-13T13:34:51,344 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-13T13:34:51,349 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33523-0x1013467e0590001, quorum=127.0.0.1:56840, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T13:34:51,349 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39113-0x1013467e0590000, quorum=127.0.0.1:56840, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T13:34:51,355 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43767 is added to blk_1073741827_1003 (size=196) 2024-11-13T13:34:51,355 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35467 is added to blk_1073741827_1003 (size=196) 2024-11-13T13:34:51,356 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-13T13:34:51,357 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-13T13:34:51,357 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-13T13:34:51,369 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43767 is added to blk_1073741828_1004 (size=1189) 2024-11-13T13:34:51,369 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35467 is added to blk_1073741828_1004 (size=1189) 2024-11-13T13:34:51,370 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/MasterData/data/master/store 2024-11-13T13:34:51,377 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43767 is added to blk_1073741829_1005 (size=34) 2024-11-13T13:34:51,377 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35467 is added to blk_1073741829_1005 (size=34) 2024-11-13T13:34:51,378 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T13:34:51,378 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-13T13:34:51,378 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T13:34:51,378 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T13:34:51,378 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-13T13:34:51,378 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T13:34:51,378 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T13:34:51,378 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731504891378Disabling compacts and flushes for region at 1731504891378Disabling writes for close at 1731504891378Writing region close event to WAL at 1731504891378Closed at 1731504891378 2024-11-13T13:34:51,379 WARN [master/bfeb2336aed7:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/MasterData/data/master/store/.initializing 2024-11-13T13:34:51,379 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/MasterData/WALs/bfeb2336aed7,39113,1731504891075 2024-11-13T13:34:51,382 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=bfeb2336aed7%2C39113%2C1731504891075, suffix=, logDir=hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/MasterData/WALs/bfeb2336aed7,39113,1731504891075, archiveDir=hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/MasterData/oldWALs, maxLogs=10 2024-11-13T13:34:51,382 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor bfeb2336aed7%2C39113%2C1731504891075.1731504891382 2024-11-13T13:34:51,388 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/MasterData/WALs/bfeb2336aed7,39113,1731504891075/bfeb2336aed7%2C39113%2C1731504891075.1731504891382 2024-11-13T13:34:51,391 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36115:36115),(127.0.0.1/127.0.0.1:33095:33095)] 2024-11-13T13:34:51,391 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-13T13:34:51,392 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T13:34:51,392 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-13T13:34:51,392 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-13T13:34:51,394 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-13T13:34:51,395 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-13T13:34:51,396 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T13:34:51,396 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T13:34:51,396 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-13T13:34:51,397 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-13T13:34:51,398 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T13:34:51,398 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T13:34:51,398 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-13T13:34:51,400 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-13T13:34:51,400 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T13:34:51,400 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T13:34:51,401 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-13T13:34:51,402 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-13T13:34:51,402 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T13:34:51,403 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T13:34:51,403 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-13T13:34:51,404 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-13T13:34:51,404 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-13T13:34:51,406 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-13T13:34:51,406 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-13T13:34:51,407 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-13T13:34:51,408 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-13T13:34:51,410 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-13T13:34:51,411 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=847417, jitterRate=0.07754673063755035}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-13T13:34:51,411 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731504891392Initializing all the Stores at 1731504891393 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731504891393Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731504891394 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731504891394Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731504891394Cleaning up temporary data from old regions at 1731504891406 (+12 ms)Region opened successfully at 1731504891411 (+5 ms) 2024-11-13T13:34:51,412 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-13T13:34:51,415 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6d661f76, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=bfeb2336aed7/172.17.0.2:0 2024-11-13T13:34:51,416 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-13T13:34:51,416 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-13T13:34:51,416 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-13T13:34:51,416 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-13T13:34:51,417 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-13T13:34:51,417 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-13T13:34:51,417 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-13T13:34:51,419 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-13T13:34:51,420 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39113-0x1013467e0590000, quorum=127.0.0.1:56840, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-13T13:34:51,433 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-13T13:34:51,433 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-13T13:34:51,434 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39113-0x1013467e0590000, quorum=127.0.0.1:56840, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-13T13:34:51,443 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-13T13:34:51,443 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-13T13:34:51,445 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39113-0x1013467e0590000, quorum=127.0.0.1:56840, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-13T13:34:51,454 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-13T13:34:51,455 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39113-0x1013467e0590000, quorum=127.0.0.1:56840, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-13T13:34:51,464 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-13T13:34:51,466 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39113-0x1013467e0590000, quorum=127.0.0.1:56840, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-13T13:34:51,475 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-13T13:34:51,485 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33523-0x1013467e0590001, quorum=127.0.0.1:56840, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-13T13:34:51,485 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39113-0x1013467e0590000, quorum=127.0.0.1:56840, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-13T13:34:51,486 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39113-0x1013467e0590000, quorum=127.0.0.1:56840, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T13:34:51,486 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33523-0x1013467e0590001, quorum=127.0.0.1:56840, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T13:34:51,487 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=bfeb2336aed7,39113,1731504891075, sessionid=0x1013467e0590000, setting cluster-up flag (Was=false) 2024-11-13T13:34:51,507 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33523-0x1013467e0590001, quorum=127.0.0.1:56840, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T13:34:51,507 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39113-0x1013467e0590000, quorum=127.0.0.1:56840, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T13:34:51,538 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-13T13:34:51,540 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=bfeb2336aed7,39113,1731504891075 2024-11-13T13:34:51,559 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33523-0x1013467e0590001, quorum=127.0.0.1:56840, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T13:34:51,559 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39113-0x1013467e0590000, quorum=127.0.0.1:56840, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T13:34:51,591 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-13T13:34:51,595 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=bfeb2336aed7,39113,1731504891075 2024-11-13T13:34:51,598 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-13T13:34:51,601 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-13T13:34:51,602 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-13T13:34:51,602 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-13T13:34:51,602 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: bfeb2336aed7,39113,1731504891075 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-13T13:34:51,604 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/bfeb2336aed7:0, corePoolSize=5, maxPoolSize=5 2024-11-13T13:34:51,604 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/bfeb2336aed7:0, corePoolSize=5, maxPoolSize=5 2024-11-13T13:34:51,604 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/bfeb2336aed7:0, corePoolSize=5, maxPoolSize=5 2024-11-13T13:34:51,604 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/bfeb2336aed7:0, corePoolSize=5, maxPoolSize=5 2024-11-13T13:34:51,604 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/bfeb2336aed7:0, corePoolSize=10, maxPoolSize=10 2024-11-13T13:34:51,604 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/bfeb2336aed7:0, corePoolSize=1, maxPoolSize=1 2024-11-13T13:34:51,604 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/bfeb2336aed7:0, corePoolSize=2, maxPoolSize=2 2024-11-13T13:34:51,604 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/bfeb2336aed7:0, corePoolSize=1, maxPoolSize=1 2024-11-13T13:34:51,606 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731504921606 2024-11-13T13:34:51,606 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-13T13:34:51,606 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-13T13:34:51,606 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-13T13:34:51,606 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-13T13:34:51,606 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-13T13:34:51,606 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-13T13:34:51,607 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-13T13:34:51,607 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-13T13:34:51,607 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-13T13:34:51,607 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-13T13:34:51,607 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-13T13:34:51,607 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-13T13:34:51,608 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T13:34:51,608 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-13T13:34:51,609 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-13T13:34:51,609 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-13T13:34:51,609 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/bfeb2336aed7:0:becomeActiveMaster-HFileCleaner.large.0-1731504891609,5,FailOnTimeoutGroup] 2024-11-13T13:34:51,611 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/bfeb2336aed7:0:becomeActiveMaster-HFileCleaner.small.0-1731504891610,5,FailOnTimeoutGroup] 2024-11-13T13:34:51,611 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-13T13:34:51,611 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-13T13:34:51,611 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-13T13:34:51,611 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-13T13:34:51,616 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35467 is added to blk_1073741831_1007 (size=1321) 2024-11-13T13:34:51,616 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43767 is added to blk_1073741831_1007 (size=1321) 2024-11-13T13:34:51,617 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-13T13:34:51,617 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239 2024-11-13T13:34:51,624 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43767 is added to blk_1073741832_1008 (size=32) 2024-11-13T13:34:51,624 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35467 is added to blk_1073741832_1008 (size=32) 2024-11-13T13:34:51,625 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T13:34:51,626 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-13T13:34:51,627 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-13T13:34:51,628 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T13:34:51,628 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T13:34:51,628 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-13T13:34:51,630 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-13T13:34:51,630 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T13:34:51,630 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T13:34:51,630 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-13T13:34:51,632 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-13T13:34:51,632 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T13:34:51,632 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T13:34:51,632 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-13T13:34:51,633 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-13T13:34:51,633 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T13:34:51,634 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T13:34:51,634 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-13T13:34:51,635 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/data/hbase/meta/1588230740 2024-11-13T13:34:51,635 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/data/hbase/meta/1588230740 2024-11-13T13:34:51,636 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-13T13:34:51,636 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-13T13:34:51,637 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-13T13:34:51,638 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-13T13:34:51,640 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-13T13:34:51,640 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=808442, jitterRate=0.02798798680305481}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-13T13:34:51,641 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731504891625Initializing all the Stores at 1731504891626 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731504891626Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731504891626Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731504891626Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731504891626Cleaning up temporary data from old regions at 1731504891636 (+10 ms)Region opened successfully at 1731504891641 (+5 ms) 2024-11-13T13:34:51,641 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-13T13:34:51,642 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-13T13:34:51,642 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-13T13:34:51,642 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-13T13:34:51,642 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-13T13:34:51,642 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-13T13:34:51,642 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731504891641Disabling compacts and flushes for region at 1731504891641Disabling writes for close at 1731504891642 (+1 ms)Writing region close event to WAL at 1731504891642Closed at 1731504891642 2024-11-13T13:34:51,644 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-13T13:34:51,644 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-13T13:34:51,644 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-13T13:34:51,645 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-13T13:34:51,647 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-13T13:34:51,672 INFO [RS:0;bfeb2336aed7:33523 {}] regionserver.HRegionServer(746): ClusterId : 16d2b140-ea7f-4b17-a840-ee87a9e64c93 2024-11-13T13:34:51,672 DEBUG [RS:0;bfeb2336aed7:33523 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-13T13:34:51,687 DEBUG [RS:0;bfeb2336aed7:33523 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-13T13:34:51,687 DEBUG [RS:0;bfeb2336aed7:33523 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-13T13:34:51,698 DEBUG [RS:0;bfeb2336aed7:33523 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-13T13:34:51,699 DEBUG [RS:0;bfeb2336aed7:33523 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@e32cae8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=bfeb2336aed7/172.17.0.2:0 2024-11-13T13:34:51,715 DEBUG [RS:0;bfeb2336aed7:33523 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;bfeb2336aed7:33523 2024-11-13T13:34:51,715 INFO [RS:0;bfeb2336aed7:33523 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-13T13:34:51,715 INFO [RS:0;bfeb2336aed7:33523 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-13T13:34:51,715 DEBUG [RS:0;bfeb2336aed7:33523 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-13T13:34:51,716 INFO [RS:0;bfeb2336aed7:33523 {}] regionserver.HRegionServer(2659): reportForDuty to master=bfeb2336aed7,39113,1731504891075 with port=33523, startcode=1731504891245 2024-11-13T13:34:51,716 DEBUG [RS:0;bfeb2336aed7:33523 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-13T13:34:51,718 INFO [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41137, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=RegionServerStatusService 2024-11-13T13:34:51,719 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39113 {}] master.ServerManager(363): Checking decommissioned status of RegionServer bfeb2336aed7,33523,1731504891245 2024-11-13T13:34:51,719 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39113 {}] master.ServerManager(517): Registering regionserver=bfeb2336aed7,33523,1731504891245 2024-11-13T13:34:51,720 DEBUG [RS:0;bfeb2336aed7:33523 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239 2024-11-13T13:34:51,720 DEBUG [RS:0;bfeb2336aed7:33523 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:40141 2024-11-13T13:34:51,720 DEBUG [RS:0;bfeb2336aed7:33523 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-13T13:34:51,727 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39113-0x1013467e0590000, quorum=127.0.0.1:56840, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-13T13:34:51,728 DEBUG [RS:0;bfeb2336aed7:33523 {}] zookeeper.ZKUtil(111): regionserver:33523-0x1013467e0590001, quorum=127.0.0.1:56840, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/bfeb2336aed7,33523,1731504891245 2024-11-13T13:34:51,728 WARN [RS:0;bfeb2336aed7:33523 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-13T13:34:51,728 INFO [RS:0;bfeb2336aed7:33523 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-13T13:34:51,728 DEBUG [RS:0;bfeb2336aed7:33523 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,33523,1731504891245 2024-11-13T13:34:51,728 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [bfeb2336aed7,33523,1731504891245] 2024-11-13T13:34:51,752 INFO [RS:0;bfeb2336aed7:33523 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-13T13:34:51,754 INFO [RS:0;bfeb2336aed7:33523 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-13T13:34:51,754 INFO [RS:0;bfeb2336aed7:33523 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-13T13:34:51,754 INFO [RS:0;bfeb2336aed7:33523 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-13T13:34:51,755 INFO [RS:0;bfeb2336aed7:33523 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-13T13:34:51,756 INFO [RS:0;bfeb2336aed7:33523 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-13T13:34:51,756 INFO [RS:0;bfeb2336aed7:33523 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-13T13:34:51,756 DEBUG [RS:0;bfeb2336aed7:33523 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/bfeb2336aed7:0, corePoolSize=1, maxPoolSize=1 2024-11-13T13:34:51,756 DEBUG [RS:0;bfeb2336aed7:33523 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/bfeb2336aed7:0, corePoolSize=1, maxPoolSize=1 2024-11-13T13:34:51,756 DEBUG [RS:0;bfeb2336aed7:33523 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/bfeb2336aed7:0, corePoolSize=1, maxPoolSize=1 2024-11-13T13:34:51,756 DEBUG [RS:0;bfeb2336aed7:33523 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/bfeb2336aed7:0, corePoolSize=1, maxPoolSize=1 2024-11-13T13:34:51,756 DEBUG [RS:0;bfeb2336aed7:33523 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/bfeb2336aed7:0, corePoolSize=1, maxPoolSize=1 2024-11-13T13:34:51,757 DEBUG [RS:0;bfeb2336aed7:33523 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/bfeb2336aed7:0, corePoolSize=2, maxPoolSize=2 2024-11-13T13:34:51,757 DEBUG [RS:0;bfeb2336aed7:33523 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/bfeb2336aed7:0, corePoolSize=1, maxPoolSize=1 2024-11-13T13:34:51,757 DEBUG [RS:0;bfeb2336aed7:33523 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/bfeb2336aed7:0, corePoolSize=1, maxPoolSize=1 2024-11-13T13:34:51,757 DEBUG [RS:0;bfeb2336aed7:33523 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/bfeb2336aed7:0, corePoolSize=1, maxPoolSize=1 2024-11-13T13:34:51,757 DEBUG [RS:0;bfeb2336aed7:33523 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/bfeb2336aed7:0, corePoolSize=1, maxPoolSize=1 2024-11-13T13:34:51,757 DEBUG [RS:0;bfeb2336aed7:33523 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/bfeb2336aed7:0, corePoolSize=1, maxPoolSize=1 2024-11-13T13:34:51,757 DEBUG [RS:0;bfeb2336aed7:33523 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/bfeb2336aed7:0, corePoolSize=1, maxPoolSize=1 2024-11-13T13:34:51,757 DEBUG [RS:0;bfeb2336aed7:33523 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/bfeb2336aed7:0, corePoolSize=3, maxPoolSize=3 2024-11-13T13:34:51,757 DEBUG [RS:0;bfeb2336aed7:33523 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/bfeb2336aed7:0, corePoolSize=3, maxPoolSize=3 2024-11-13T13:34:51,757 INFO [RS:0;bfeb2336aed7:33523 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-13T13:34:51,757 INFO [RS:0;bfeb2336aed7:33523 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-13T13:34:51,757 INFO [RS:0;bfeb2336aed7:33523 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-13T13:34:51,758 INFO [RS:0;bfeb2336aed7:33523 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-13T13:34:51,758 INFO [RS:0;bfeb2336aed7:33523 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-13T13:34:51,758 INFO [RS:0;bfeb2336aed7:33523 {}] hbase.ChoreService(168): Chore ScheduledChore name=bfeb2336aed7,33523,1731504891245-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-13T13:34:51,772 INFO [RS:0;bfeb2336aed7:33523 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-13T13:34:51,772 INFO [RS:0;bfeb2336aed7:33523 {}] hbase.ChoreService(168): Chore ScheduledChore name=bfeb2336aed7,33523,1731504891245-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-13T13:34:51,772 INFO [RS:0;bfeb2336aed7:33523 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-13T13:34:51,773 INFO [RS:0;bfeb2336aed7:33523 {}] regionserver.Replication(171): bfeb2336aed7,33523,1731504891245 started 2024-11-13T13:34:51,787 INFO [RS:0;bfeb2336aed7:33523 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-13T13:34:51,787 INFO [RS:0;bfeb2336aed7:33523 {}] regionserver.HRegionServer(1482): Serving as bfeb2336aed7,33523,1731504891245, RpcServer on bfeb2336aed7/172.17.0.2:33523, sessionid=0x1013467e0590001 2024-11-13T13:34:51,787 DEBUG [RS:0;bfeb2336aed7:33523 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-13T13:34:51,787 DEBUG [RS:0;bfeb2336aed7:33523 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager bfeb2336aed7,33523,1731504891245 2024-11-13T13:34:51,787 DEBUG [RS:0;bfeb2336aed7:33523 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'bfeb2336aed7,33523,1731504891245' 2024-11-13T13:34:51,787 DEBUG [RS:0;bfeb2336aed7:33523 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-13T13:34:51,788 DEBUG [RS:0;bfeb2336aed7:33523 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-13T13:34:51,789 DEBUG [RS:0;bfeb2336aed7:33523 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-13T13:34:51,789 DEBUG [RS:0;bfeb2336aed7:33523 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-13T13:34:51,789 DEBUG [RS:0;bfeb2336aed7:33523 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager bfeb2336aed7,33523,1731504891245 2024-11-13T13:34:51,789 DEBUG [RS:0;bfeb2336aed7:33523 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'bfeb2336aed7,33523,1731504891245' 2024-11-13T13:34:51,789 DEBUG [RS:0;bfeb2336aed7:33523 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-13T13:34:51,789 DEBUG [RS:0;bfeb2336aed7:33523 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-13T13:34:51,790 DEBUG [RS:0;bfeb2336aed7:33523 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-13T13:34:51,790 INFO [RS:0;bfeb2336aed7:33523 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-13T13:34:51,790 INFO [RS:0;bfeb2336aed7:33523 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-13T13:34:51,797 WARN [bfeb2336aed7:39113 {}] assignment.AssignmentManager(2443): No servers available; cannot place 1 unassigned regions. 2024-11-13T13:34:51,892 INFO [RS:0;bfeb2336aed7:33523 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=bfeb2336aed7%2C33523%2C1731504891245, suffix=, logDir=hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,33523,1731504891245, archiveDir=hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/oldWALs, maxLogs=32 2024-11-13T13:34:51,893 INFO [RS:0;bfeb2336aed7:33523 {}] monitor.StreamSlowMonitor(122): New stream slow monitor bfeb2336aed7%2C33523%2C1731504891245.1731504891893 2024-11-13T13:34:51,900 INFO [RS:0;bfeb2336aed7:33523 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,33523,1731504891245/bfeb2336aed7%2C33523%2C1731504891245.1731504891893 2024-11-13T13:34:51,903 DEBUG [RS:0;bfeb2336aed7:33523 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36115:36115),(127.0.0.1/127.0.0.1:33095:33095)] 2024-11-13T13:34:51,948 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-13T13:34:51,948 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-13T13:34:51,948 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling 2024-11-13T13:34:52,047 DEBUG [bfeb2336aed7:39113 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-13T13:34:52,048 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=bfeb2336aed7,33523,1731504891245 2024-11-13T13:34:52,051 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as bfeb2336aed7,33523,1731504891245, state=OPENING 2024-11-13T13:34:52,075 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-13T13:34:52,085 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33523-0x1013467e0590001, quorum=127.0.0.1:56840, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T13:34:52,085 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39113-0x1013467e0590000, quorum=127.0.0.1:56840, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T13:34:52,086 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-13T13:34:52,086 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-13T13:34:52,086 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-13T13:34:52,086 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=bfeb2336aed7,33523,1731504891245}] 2024-11-13T13:34:52,242 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-13T13:34:52,246 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45351, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-13T13:34:52,254 INFO [RS_OPEN_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-13T13:34:52,255 INFO [RS_OPEN_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-13T13:34:52,257 INFO [RS_OPEN_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=bfeb2336aed7%2C33523%2C1731504891245.meta, suffix=.meta, logDir=hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,33523,1731504891245, archiveDir=hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/oldWALs, maxLogs=32 2024-11-13T13:34:52,258 INFO [RS_OPEN_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor bfeb2336aed7%2C33523%2C1731504891245.meta.1731504892258.meta 2024-11-13T13:34:52,265 INFO [RS_OPEN_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,33523,1731504891245/bfeb2336aed7%2C33523%2C1731504891245.meta.1731504892258.meta 2024-11-13T13:34:52,267 DEBUG [RS_OPEN_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36115:36115),(127.0.0.1/127.0.0.1:33095:33095)] 2024-11-13T13:34:52,268 DEBUG [RS_OPEN_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-13T13:34:52,269 DEBUG [RS_OPEN_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-13T13:34:52,269 DEBUG [RS_OPEN_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-13T13:34:52,269 INFO [RS_OPEN_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-13T13:34:52,269 DEBUG [RS_OPEN_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-13T13:34:52,269 DEBUG [RS_OPEN_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T13:34:52,270 DEBUG [RS_OPEN_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-13T13:34:52,270 DEBUG [RS_OPEN_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-13T13:34:52,272 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-13T13:34:52,273 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-13T13:34:52,273 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T13:34:52,274 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T13:34:52,274 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-13T13:34:52,275 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-13T13:34:52,275 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T13:34:52,276 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T13:34:52,276 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-13T13:34:52,277 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-13T13:34:52,277 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T13:34:52,278 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T13:34:52,278 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-13T13:34:52,279 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-13T13:34:52,279 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T13:34:52,280 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T13:34:52,280 DEBUG [RS_OPEN_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-13T13:34:52,281 DEBUG [RS_OPEN_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/data/hbase/meta/1588230740 2024-11-13T13:34:52,283 DEBUG [RS_OPEN_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/data/hbase/meta/1588230740 2024-11-13T13:34:52,284 DEBUG [RS_OPEN_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-13T13:34:52,285 DEBUG [RS_OPEN_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-13T13:34:52,285 DEBUG [RS_OPEN_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-13T13:34:52,286 DEBUG [RS_OPEN_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-13T13:34:52,287 INFO [RS_OPEN_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=839547, jitterRate=0.0675395131111145}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-13T13:34:52,287 DEBUG [RS_OPEN_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-13T13:34:52,287 DEBUG [RS_OPEN_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731504892270Writing region info on filesystem at 1731504892270Initializing all the Stores at 1731504892272 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731504892272Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731504892272Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731504892272Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731504892272Cleaning up temporary data from old regions at 1731504892285 (+13 ms)Running coprocessor post-open hooks at 1731504892287 (+2 ms)Region opened successfully at 1731504892287 2024-11-13T13:34:52,289 INFO [RS_OPEN_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731504892241 2024-11-13T13:34:52,291 DEBUG [RS_OPEN_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-13T13:34:52,291 INFO [RS_OPEN_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-13T13:34:52,292 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=bfeb2336aed7,33523,1731504891245 2024-11-13T13:34:52,294 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as bfeb2336aed7,33523,1731504891245, state=OPEN 2024-11-13T13:34:52,379 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33523-0x1013467e0590001, quorum=127.0.0.1:56840, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-13T13:34:52,379 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39113-0x1013467e0590000, quorum=127.0.0.1:56840, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-13T13:34:52,380 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=bfeb2336aed7,33523,1731504891245 2024-11-13T13:34:52,380 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-13T13:34:52,380 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-13T13:34:52,383 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-13T13:34:52,383 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=bfeb2336aed7,33523,1731504891245 in 294 msec 2024-11-13T13:34:52,386 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-13T13:34:52,386 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 739 msec 2024-11-13T13:34:52,387 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-13T13:34:52,387 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-13T13:34:52,388 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-13T13:34:52,388 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=bfeb2336aed7,33523,1731504891245, seqNum=-1] 2024-11-13T13:34:52,389 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-13T13:34:52,390 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54067, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-13T13:34:52,396 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 795 msec 2024-11-13T13:34:52,396 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731504892396, completionTime=-1 2024-11-13T13:34:52,397 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-13T13:34:52,397 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-13T13:34:52,399 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=1 2024-11-13T13:34:52,399 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731504952399 2024-11-13T13:34:52,399 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731505012399 2024-11-13T13:34:52,399 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 2 msec 2024-11-13T13:34:52,399 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=bfeb2336aed7,39113,1731504891075-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-13T13:34:52,399 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=bfeb2336aed7,39113,1731504891075-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-13T13:34:52,399 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=bfeb2336aed7,39113,1731504891075-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-13T13:34:52,399 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-bfeb2336aed7:39113, period=300000, unit=MILLISECONDS is enabled. 2024-11-13T13:34:52,399 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-13T13:34:52,399 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-13T13:34:52,401 DEBUG [master/bfeb2336aed7:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-13T13:34:52,403 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.086sec 2024-11-13T13:34:52,403 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-13T13:34:52,403 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-13T13:34:52,403 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-13T13:34:52,403 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-13T13:34:52,404 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-13T13:34:52,404 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=bfeb2336aed7,39113,1731504891075-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-13T13:34:52,404 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=bfeb2336aed7,39113,1731504891075-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-13T13:34:52,406 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-13T13:34:52,407 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-13T13:34:52,407 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=bfeb2336aed7,39113,1731504891075-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-13T13:34:52,472 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@19f5a74a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T13:34:52,472 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request bfeb2336aed7,39113,-1 for getting cluster id 2024-11-13T13:34:52,473 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-13T13:34:52,475 DEBUG [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '16d2b140-ea7f-4b17-a840-ee87a9e64c93' 2024-11-13T13:34:52,476 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-13T13:34:52,476 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "16d2b140-ea7f-4b17-a840-ee87a9e64c93" 2024-11-13T13:34:52,477 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3a72ea29, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T13:34:52,477 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [bfeb2336aed7,39113,-1] 2024-11-13T13:34:52,477 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-13T13:34:52,478 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T13:34:52,481 INFO [HMaster-EventLoopGroup-7-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37514, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-13T13:34:52,482 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@706e1536, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T13:34:52,482 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-13T13:34:52,483 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=bfeb2336aed7,33523,1731504891245, seqNum=-1] 2024-11-13T13:34:52,484 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-13T13:34:52,485 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44852, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-13T13:34:52,487 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=bfeb2336aed7,39113,1731504891075 2024-11-13T13:34:52,487 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T13:34:52,490 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-13T13:34:52,507 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/bfeb2336aed7:0 server-side Connection retries=45 2024-11-13T13:34:52,507 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-13T13:34:52,507 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-13T13:34:52,507 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-13T13:34:52,507 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-13T13:34:52,507 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-13T13:34:52,507 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-13T13:34:52,508 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-13T13:34:52,508 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:38781 2024-11-13T13:34:52,509 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:38781 connecting to ZooKeeper ensemble=127.0.0.1:56840 2024-11-13T13:34:52,510 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T13:34:52,511 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T13:34:52,538 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:387810x0, quorum=127.0.0.1:56840, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-13T13:34:52,539 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:38781-0x1013467e0590002 connected 2024-11-13T13:34:52,539 DEBUG [Time-limited test {}] zookeeper.ZKUtil(111): regionserver:38781-0x1013467e0590002, quorum=127.0.0.1:56840, baseZNode=/hbase Set watcher on existing znode=/hbase/running 2024-11-13T13:34:52,539 DEBUG [pool-381-thread-1 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: INIT 2024-11-13T13:34:52,540 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-13T13:34:52,541 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-13T13:34:52,542 DEBUG [Time-limited test {}] zookeeper.ZKUtil(111): regionserver:38781-0x1013467e0590002, quorum=127.0.0.1:56840, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-13T13:34:52,544 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:38781-0x1013467e0590002, quorum=127.0.0.1:56840, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-13T13:34:52,545 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=38781 2024-11-13T13:34:52,548 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=38781 2024-11-13T13:34:52,548 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=38781 2024-11-13T13:34:52,551 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=38781 2024-11-13T13:34:52,551 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=38781 2024-11-13T13:34:52,553 INFO [RS:1;bfeb2336aed7:38781 {}] regionserver.HRegionServer(746): ClusterId : 16d2b140-ea7f-4b17-a840-ee87a9e64c93 2024-11-13T13:34:52,554 DEBUG [RS:1;bfeb2336aed7:38781 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-13T13:34:52,560 DEBUG [RS:1;bfeb2336aed7:38781 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-13T13:34:52,560 DEBUG [RS:1;bfeb2336aed7:38781 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-13T13:34:52,571 DEBUG [RS:1;bfeb2336aed7:38781 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-13T13:34:52,572 DEBUG [RS:1;bfeb2336aed7:38781 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7c56f5f7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=bfeb2336aed7/172.17.0.2:0 2024-11-13T13:34:52,584 DEBUG [RS:1;bfeb2336aed7:38781 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;bfeb2336aed7:38781 2024-11-13T13:34:52,584 INFO [RS:1;bfeb2336aed7:38781 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-13T13:34:52,584 INFO [RS:1;bfeb2336aed7:38781 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-13T13:34:52,584 DEBUG [RS:1;bfeb2336aed7:38781 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-13T13:34:52,585 INFO [RS:1;bfeb2336aed7:38781 {}] regionserver.HRegionServer(2659): reportForDuty to master=bfeb2336aed7,39113,1731504891075 with port=38781, startcode=1731504892507 2024-11-13T13:34:52,585 DEBUG [RS:1;bfeb2336aed7:38781 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-13T13:34:52,587 INFO [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38217, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.3 (auth:SIMPLE), service=RegionServerStatusService 2024-11-13T13:34:52,588 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39113 {}] master.ServerManager(363): Checking decommissioned status of RegionServer bfeb2336aed7,38781,1731504892507 2024-11-13T13:34:52,588 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39113 {}] master.ServerManager(517): Registering regionserver=bfeb2336aed7,38781,1731504892507 2024-11-13T13:34:52,590 DEBUG [RS:1;bfeb2336aed7:38781 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239 2024-11-13T13:34:52,590 DEBUG [RS:1;bfeb2336aed7:38781 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:40141 2024-11-13T13:34:52,590 DEBUG [RS:1;bfeb2336aed7:38781 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-13T13:34:52,601 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39113-0x1013467e0590000, quorum=127.0.0.1:56840, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-13T13:34:52,601 DEBUG [RS:1;bfeb2336aed7:38781 {}] zookeeper.ZKUtil(111): regionserver:38781-0x1013467e0590002, quorum=127.0.0.1:56840, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/bfeb2336aed7,38781,1731504892507 2024-11-13T13:34:52,602 WARN [RS:1;bfeb2336aed7:38781 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-13T13:34:52,602 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [bfeb2336aed7,38781,1731504892507] 2024-11-13T13:34:52,602 INFO [RS:1;bfeb2336aed7:38781 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-13T13:34:52,602 DEBUG [RS:1;bfeb2336aed7:38781 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,38781,1731504892507 2024-11-13T13:34:52,606 INFO [RS:1;bfeb2336aed7:38781 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-13T13:34:52,608 INFO [RS:1;bfeb2336aed7:38781 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-13T13:34:52,608 INFO [RS:1;bfeb2336aed7:38781 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-13T13:34:52,608 INFO [RS:1;bfeb2336aed7:38781 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-13T13:34:52,608 INFO [RS:1;bfeb2336aed7:38781 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-13T13:34:52,609 INFO [RS:1;bfeb2336aed7:38781 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-13T13:34:52,610 INFO [RS:1;bfeb2336aed7:38781 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-13T13:34:52,610 DEBUG [RS:1;bfeb2336aed7:38781 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/bfeb2336aed7:0, corePoolSize=1, maxPoolSize=1 2024-11-13T13:34:52,610 DEBUG [RS:1;bfeb2336aed7:38781 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/bfeb2336aed7:0, corePoolSize=1, maxPoolSize=1 2024-11-13T13:34:52,610 DEBUG [RS:1;bfeb2336aed7:38781 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/bfeb2336aed7:0, corePoolSize=1, maxPoolSize=1 2024-11-13T13:34:52,610 DEBUG [RS:1;bfeb2336aed7:38781 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/bfeb2336aed7:0, corePoolSize=1, maxPoolSize=1 2024-11-13T13:34:52,610 DEBUG [RS:1;bfeb2336aed7:38781 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/bfeb2336aed7:0, corePoolSize=1, maxPoolSize=1 2024-11-13T13:34:52,610 DEBUG [RS:1;bfeb2336aed7:38781 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/bfeb2336aed7:0, corePoolSize=2, maxPoolSize=2 2024-11-13T13:34:52,610 DEBUG [RS:1;bfeb2336aed7:38781 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/bfeb2336aed7:0, corePoolSize=1, maxPoolSize=1 2024-11-13T13:34:52,610 DEBUG [RS:1;bfeb2336aed7:38781 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/bfeb2336aed7:0, corePoolSize=1, maxPoolSize=1 2024-11-13T13:34:52,611 DEBUG [RS:1;bfeb2336aed7:38781 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/bfeb2336aed7:0, corePoolSize=1, maxPoolSize=1 2024-11-13T13:34:52,611 DEBUG [RS:1;bfeb2336aed7:38781 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/bfeb2336aed7:0, corePoolSize=1, maxPoolSize=1 2024-11-13T13:34:52,611 DEBUG [RS:1;bfeb2336aed7:38781 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/bfeb2336aed7:0, corePoolSize=1, maxPoolSize=1 2024-11-13T13:34:52,611 DEBUG [RS:1;bfeb2336aed7:38781 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/bfeb2336aed7:0, corePoolSize=1, maxPoolSize=1 2024-11-13T13:34:52,611 DEBUG [RS:1;bfeb2336aed7:38781 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/bfeb2336aed7:0, corePoolSize=3, maxPoolSize=3 2024-11-13T13:34:52,611 DEBUG [RS:1;bfeb2336aed7:38781 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/bfeb2336aed7:0, corePoolSize=3, maxPoolSize=3 2024-11-13T13:34:52,612 INFO [RS:1;bfeb2336aed7:38781 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-13T13:34:52,612 INFO [RS:1;bfeb2336aed7:38781 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-13T13:34:52,612 INFO [RS:1;bfeb2336aed7:38781 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-13T13:34:52,612 INFO [RS:1;bfeb2336aed7:38781 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-13T13:34:52,612 INFO [RS:1;bfeb2336aed7:38781 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-13T13:34:52,612 INFO [RS:1;bfeb2336aed7:38781 {}] hbase.ChoreService(168): Chore ScheduledChore name=bfeb2336aed7,38781,1731504892507-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-13T13:34:52,628 INFO [RS:1;bfeb2336aed7:38781 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-13T13:34:52,628 INFO [RS:1;bfeb2336aed7:38781 {}] hbase.ChoreService(168): Chore ScheduledChore name=bfeb2336aed7,38781,1731504892507-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-13T13:34:52,628 INFO [RS:1;bfeb2336aed7:38781 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-13T13:34:52,628 INFO [RS:1;bfeb2336aed7:38781 {}] regionserver.Replication(171): bfeb2336aed7,38781,1731504892507 started 2024-11-13T13:34:52,641 INFO [RS:1;bfeb2336aed7:38781 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-13T13:34:52,642 INFO [RS:1;bfeb2336aed7:38781 {}] regionserver.HRegionServer(1482): Serving as bfeb2336aed7,38781,1731504892507, RpcServer on bfeb2336aed7/172.17.0.2:38781, sessionid=0x1013467e0590002 2024-11-13T13:34:52,642 DEBUG [RS:1;bfeb2336aed7:38781 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-13T13:34:52,642 INFO [Time-limited test {}] hbase.HBaseTestingUtil(2882): Started new server=Thread[RS:1;bfeb2336aed7:38781,5,FailOnTimeoutGroup] 2024-11-13T13:34:52,642 DEBUG [RS:1;bfeb2336aed7:38781 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager bfeb2336aed7,38781,1731504892507 2024-11-13T13:34:52,642 DEBUG [RS:1;bfeb2336aed7:38781 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'bfeb2336aed7,38781,1731504892507' 2024-11-13T13:34:52,642 DEBUG [RS:1;bfeb2336aed7:38781 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-13T13:34:52,642 INFO [Time-limited test {}] wal.TestLogRolling(207): Replication=2 2024-11-13T13:34:52,643 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-13T13:34:52,643 DEBUG [RS:1;bfeb2336aed7:38781 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-13T13:34:52,643 DEBUG [RS:1;bfeb2336aed7:38781 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-13T13:34:52,643 DEBUG [RS:1;bfeb2336aed7:38781 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-13T13:34:52,643 DEBUG [RS:1;bfeb2336aed7:38781 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager bfeb2336aed7,38781,1731504892507 2024-11-13T13:34:52,643 DEBUG [RS:1;bfeb2336aed7:38781 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'bfeb2336aed7,38781,1731504892507' 2024-11-13T13:34:52,643 DEBUG [RS:1;bfeb2336aed7:38781 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-13T13:34:52,644 DEBUG [RS:1;bfeb2336aed7:38781 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-13T13:34:52,644 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.AsyncConnectionImpl(321): The fetched master address is bfeb2336aed7,39113,1731504891075 2024-11-13T13:34:52,644 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@ee1d7f6 2024-11-13T13:34:52,644 DEBUG [RS:1;bfeb2336aed7:38781 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-13T13:34:52,644 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-13T13:34:52,644 INFO [RS:1;bfeb2336aed7:38781 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-13T13:34:52,644 INFO [RS:1;bfeb2336aed7:38781 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-13T13:34:52,646 INFO [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37524, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-13T13:34:52,646 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39113 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-13T13:34:52,646 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39113 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-13T13:34:52,647 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39113 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testLogRollOnDatanodeDeath', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-13T13:34:52,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39113 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath 2024-11-13T13:34:52,650 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_PRE_OPERATION 2024-11-13T13:34:52,650 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T13:34:52,650 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39113 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRollOnDatanodeDeath" procId is: 4 2024-11-13T13:34:52,651 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-13T13:34:52,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39113 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-13T13:34:52,659 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43767 is added to blk_1073741835_1011 (size=393) 2024-11-13T13:34:52,659 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35467 is added to blk_1073741835_1011 (size=393) 2024-11-13T13:34:52,661 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 791221fd56c9a0e65b7eca684d5a205f, NAME => 'TestLogRolling-testLogRollOnDatanodeDeath,,1731504892646.791221fd56c9a0e65b7eca684d5a205f.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRollOnDatanodeDeath', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239 2024-11-13T13:34:52,667 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35467 is added to blk_1073741836_1012 (size=76) 2024-11-13T13:34:52,668 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43767 is added to blk_1073741836_1012 (size=76) 2024-11-13T13:34:52,668 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnDatanodeDeath,,1731504892646.791221fd56c9a0e65b7eca684d5a205f.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T13:34:52,669 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1722): Closing 791221fd56c9a0e65b7eca684d5a205f, disabling compactions & flushes 2024-11-13T13:34:52,669 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnDatanodeDeath,,1731504892646.791221fd56c9a0e65b7eca684d5a205f. 2024-11-13T13:34:52,669 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1731504892646.791221fd56c9a0e65b7eca684d5a205f. 2024-11-13T13:34:52,669 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1731504892646.791221fd56c9a0e65b7eca684d5a205f. after waiting 0 ms 2024-11-13T13:34:52,669 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnDatanodeDeath,,1731504892646.791221fd56c9a0e65b7eca684d5a205f. 2024-11-13T13:34:52,669 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1731504892646.791221fd56c9a0e65b7eca684d5a205f. 2024-11-13T13:34:52,669 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1676): Region close journal for 791221fd56c9a0e65b7eca684d5a205f: Waiting for close lock at 1731504892668Disabling compacts and flushes for region at 1731504892668Disabling writes for close at 1731504892669 (+1 ms)Writing region close event to WAL at 1731504892669Closed at 1731504892669 2024-11-13T13:34:52,671 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_ADD_TO_META 2024-11-13T13:34:52,672 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRollOnDatanodeDeath,,1731504892646.791221fd56c9a0e65b7eca684d5a205f.","families":{"info":[{"qualifier":"regioninfo","vlen":75,"tag":[],"timestamp":"1731504892671"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731504892671"}]},"ts":"1731504892671"} 2024-11-13T13:34:52,675 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-13T13:34:52,677 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-13T13:34:52,678 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnDatanodeDeath","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731504892677"}]},"ts":"1731504892677"} 2024-11-13T13:34:52,681 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnDatanodeDeath, state=ENABLING in hbase:meta 2024-11-13T13:34:52,681 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=791221fd56c9a0e65b7eca684d5a205f, ASSIGN}] 2024-11-13T13:34:52,683 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=791221fd56c9a0e65b7eca684d5a205f, ASSIGN 2024-11-13T13:34:52,685 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=791221fd56c9a0e65b7eca684d5a205f, ASSIGN; state=OFFLINE, location=bfeb2336aed7,33523,1731504891245; forceNewPlan=false, retain=false 2024-11-13T13:34:52,746 INFO [RS:1;bfeb2336aed7:38781 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=bfeb2336aed7%2C38781%2C1731504892507, suffix=, logDir=hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,38781,1731504892507, archiveDir=hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/oldWALs, maxLogs=32 2024-11-13T13:34:52,747 INFO [RS:1;bfeb2336aed7:38781 {}] monitor.StreamSlowMonitor(122): New stream slow monitor bfeb2336aed7%2C38781%2C1731504892507.1731504892747 2024-11-13T13:34:52,753 INFO [RS:1;bfeb2336aed7:38781 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,38781,1731504892507/bfeb2336aed7%2C38781%2C1731504892507.1731504892747 2024-11-13T13:34:52,754 DEBUG [RS:1;bfeb2336aed7:38781 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33095:33095),(127.0.0.1/127.0.0.1:36115:36115)] 2024-11-13T13:34:52,836 INFO [bfeb2336aed7:39113 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-11-13T13:34:52,836 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=791221fd56c9a0e65b7eca684d5a205f, regionState=OPENING, regionLocation=bfeb2336aed7,33523,1731504891245 2024-11-13T13:34:52,839 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-8-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=791221fd56c9a0e65b7eca684d5a205f, ASSIGN because future has completed 2024-11-13T13:34:52,840 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 791221fd56c9a0e65b7eca684d5a205f, server=bfeb2336aed7,33523,1731504891245}] 2024-11-13T13:34:53,004 INFO [RS_OPEN_REGION-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRollOnDatanodeDeath,,1731504892646.791221fd56c9a0e65b7eca684d5a205f. 2024-11-13T13:34:53,004 DEBUG [RS_OPEN_REGION-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 791221fd56c9a0e65b7eca684d5a205f, NAME => 'TestLogRolling-testLogRollOnDatanodeDeath,,1731504892646.791221fd56c9a0e65b7eca684d5a205f.', STARTKEY => '', ENDKEY => ''} 2024-11-13T13:34:53,006 DEBUG [RS_OPEN_REGION-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRollOnDatanodeDeath 791221fd56c9a0e65b7eca684d5a205f 2024-11-13T13:34:53,006 DEBUG [RS_OPEN_REGION-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnDatanodeDeath,,1731504892646.791221fd56c9a0e65b7eca684d5a205f.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T13:34:53,006 DEBUG [RS_OPEN_REGION-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 791221fd56c9a0e65b7eca684d5a205f 2024-11-13T13:34:53,006 DEBUG [RS_OPEN_REGION-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 791221fd56c9a0e65b7eca684d5a205f 2024-11-13T13:34:53,009 INFO [StoreOpener-791221fd56c9a0e65b7eca684d5a205f-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 791221fd56c9a0e65b7eca684d5a205f 2024-11-13T13:34:53,010 INFO [StoreOpener-791221fd56c9a0e65b7eca684d5a205f-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 791221fd56c9a0e65b7eca684d5a205f columnFamilyName info 2024-11-13T13:34:53,010 DEBUG [StoreOpener-791221fd56c9a0e65b7eca684d5a205f-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T13:34:53,011 INFO [StoreOpener-791221fd56c9a0e65b7eca684d5a205f-1 {}] regionserver.HStore(327): Store=791221fd56c9a0e65b7eca684d5a205f/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T13:34:53,011 DEBUG [RS_OPEN_REGION-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 791221fd56c9a0e65b7eca684d5a205f 2024-11-13T13:34:53,012 DEBUG [RS_OPEN_REGION-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/data/default/TestLogRolling-testLogRollOnDatanodeDeath/791221fd56c9a0e65b7eca684d5a205f 2024-11-13T13:34:53,012 DEBUG [RS_OPEN_REGION-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/data/default/TestLogRolling-testLogRollOnDatanodeDeath/791221fd56c9a0e65b7eca684d5a205f 2024-11-13T13:34:53,013 DEBUG [RS_OPEN_REGION-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 791221fd56c9a0e65b7eca684d5a205f 2024-11-13T13:34:53,013 DEBUG [RS_OPEN_REGION-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 791221fd56c9a0e65b7eca684d5a205f 2024-11-13T13:34:53,015 DEBUG [RS_OPEN_REGION-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 791221fd56c9a0e65b7eca684d5a205f 2024-11-13T13:34:53,017 DEBUG [RS_OPEN_REGION-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/data/default/TestLogRolling-testLogRollOnDatanodeDeath/791221fd56c9a0e65b7eca684d5a205f/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-13T13:34:53,017 INFO [RS_OPEN_REGION-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 791221fd56c9a0e65b7eca684d5a205f; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=879568, jitterRate=0.11842910945415497}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-13T13:34:53,017 DEBUG [RS_OPEN_REGION-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 791221fd56c9a0e65b7eca684d5a205f 2024-11-13T13:34:53,018 DEBUG [RS_OPEN_REGION-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 791221fd56c9a0e65b7eca684d5a205f: Running coprocessor pre-open hook at 1731504893007Writing region info on filesystem at 1731504893007Initializing all the Stores at 1731504893008 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731504893008Cleaning up temporary data from old regions at 1731504893013 (+5 ms)Running coprocessor post-open hooks at 1731504893017 (+4 ms)Region opened successfully at 1731504893018 (+1 ms) 2024-11-13T13:34:53,019 INFO [RS_OPEN_REGION-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRollOnDatanodeDeath,,1731504892646.791221fd56c9a0e65b7eca684d5a205f., pid=6, masterSystemTime=1731504892994 2024-11-13T13:34:53,021 DEBUG [RS_OPEN_REGION-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRollOnDatanodeDeath,,1731504892646.791221fd56c9a0e65b7eca684d5a205f. 2024-11-13T13:34:53,021 INFO [RS_OPEN_REGION-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRollOnDatanodeDeath,,1731504892646.791221fd56c9a0e65b7eca684d5a205f. 2024-11-13T13:34:53,021 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=791221fd56c9a0e65b7eca684d5a205f, regionState=OPEN, openSeqNum=2, regionLocation=bfeb2336aed7,33523,1731504891245 2024-11-13T13:34:53,024 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-8-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 791221fd56c9a0e65b7eca684d5a205f, server=bfeb2336aed7,33523,1731504891245 because future has completed 2024-11-13T13:34:53,025 WARN [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39113 {}] assignment.AssignmentManager(1535): Unable to acquire lock for regionNode state=OPEN, location=bfeb2336aed7,33523,1731504891245, table=TestLogRolling-testLogRollOnDatanodeDeath, region=791221fd56c9a0e65b7eca684d5a205f. It is likely that another thread is currently holding the lock. To avoid deadlock, skip execution for now. 2024-11-13T13:34:53,028 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-13T13:34:53,028 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 791221fd56c9a0e65b7eca684d5a205f, server=bfeb2336aed7,33523,1731504891245 in 185 msec 2024-11-13T13:34:53,032 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-13T13:34:53,032 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=791221fd56c9a0e65b7eca684d5a205f, ASSIGN in 347 msec 2024-11-13T13:34:53,033 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-13T13:34:53,034 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnDatanodeDeath","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731504893033"}]},"ts":"1731504893033"} 2024-11-13T13:34:53,037 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnDatanodeDeath, state=ENABLED in hbase:meta 2024-11-13T13:34:53,038 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_POST_OPERATION 2024-11-13T13:34:53,041 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath in 391 msec 2024-11-13T13:34:53,532 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-13T13:34:53,537 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:34:53,564 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:34:53,567 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:34:53,568 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:34:57,752 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRollOnDatanodeDeath' 2024-11-13T13:34:58,508 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-13T13:34:58,510 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:34:58,531 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:34:58,534 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:34:58,534 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:35:01,948 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-13T13:35:01,948 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-13T13:35:01,950 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath 2024-11-13T13:35:01,950 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath Metrics about Tables on a single HBase RegionServer 2024-11-13T13:35:01,951 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-13T13:35:01,951 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-13T13:35:02,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39113 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-13T13:35:02,733 INFO [RPCClient-NioEventLoopGroup-4-11 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRollOnDatanodeDeath completed 2024-11-13T13:35:02,733 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRollOnDatanodeDeath,, stopping at row=TestLogRolling-testLogRollOnDatanodeDeath ,, for max=2147483647 with caching=100 2024-11-13T13:35:02,740 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRollOnDatanodeDeath 2024-11-13T13:35:02,740 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRollOnDatanodeDeath,,1731504892646.791221fd56c9a0e65b7eca684d5a205f. 2024-11-13T13:35:02,760 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-13T13:35:02,765 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-13T13:35:02,768 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-13T13:35:02,768 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-13T13:35:02,768 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-13T13:35:02,769 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2137ebee{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/210997d4-e94b-09b4-185f-d9911e45a816/hadoop.log.dir/,AVAILABLE} 2024-11-13T13:35:02,769 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5c0b8b07{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-13T13:35:02,862 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@15010086{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/210997d4-e94b-09b4-185f-d9911e45a816/java.io.tmpdir/jetty-localhost-44153-hadoop-hdfs-3_4_1-tests_jar-_-any-7271965281185658531/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-13T13:35:02,863 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@7ab1ed71{HTTP/1.1, (http/1.1)}{localhost:44153} 2024-11-13T13:35:02,863 INFO [Time-limited test {}] server.Server(415): Started @117550ms 2024-11-13T13:35:02,864 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-13T13:35:02,898 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-13T13:35:02,901 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-13T13:35:02,902 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-13T13:35:02,902 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-13T13:35:02,902 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-13T13:35:02,903 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5d4dc0bd{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/210997d4-e94b-09b4-185f-d9911e45a816/hadoop.log.dir/,AVAILABLE} 2024-11-13T13:35:02,903 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@756ea16d{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-13T13:35:03,027 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@e562151{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/210997d4-e94b-09b4-185f-d9911e45a816/java.io.tmpdir/jetty-localhost-43793-hadoop-hdfs-3_4_1-tests_jar-_-any-13562834448921630362/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-13T13:35:03,027 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@7cde9b58{HTTP/1.1, (http/1.1)}{localhost:43793} 2024-11-13T13:35:03,028 INFO [Time-limited test {}] server.Server(415): Started @117715ms 2024-11-13T13:35:03,030 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-13T13:35:03,062 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-13T13:35:03,066 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-13T13:35:03,066 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-13T13:35:03,066 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-13T13:35:03,067 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-13T13:35:03,067 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@56a8b58e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/210997d4-e94b-09b4-185f-d9911e45a816/hadoop.log.dir/,AVAILABLE} 2024-11-13T13:35:03,067 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4949cd53{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-13T13:35:03,166 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@f0ef2ce{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/210997d4-e94b-09b4-185f-d9911e45a816/java.io.tmpdir/jetty-localhost-41419-hadoop-hdfs-3_4_1-tests_jar-_-any-2050531812512342492/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-13T13:35:03,167 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@37673872{HTTP/1.1, (http/1.1)}{localhost:41419} 2024-11-13T13:35:03,167 INFO [Time-limited test {}] server.Server(415): Started @117854ms 2024-11-13T13:35:03,168 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-13T13:35:04,538 WARN [Thread-862 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/210997d4-e94b-09b4-185f-d9911e45a816/cluster_4b5f2985-1792-e678-40f5-a273020dd5a5/data/data5/current/BP-1828668861-172.17.0.2-1731504888871/current, will proceed with Du for space computation calculation, 2024-11-13T13:35:04,539 WARN [Thread-863 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/210997d4-e94b-09b4-185f-d9911e45a816/cluster_4b5f2985-1792-e678-40f5-a273020dd5a5/data/data6/current/BP-1828668861-172.17.0.2-1731504888871/current, will proceed with Du for space computation calculation, 2024-11-13T13:35:04,560 WARN [Thread-803 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-13T13:35:04,563 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x742412c66045b236 with lease ID 0x2515494a6e271d75: Processing first storage report for DS-015271de-8eac-4900-8d07-4f9567b2b004 from datanode DatanodeRegistration(127.0.0.1:33931, datanodeUuid=8f99e9e2-208b-40da-abea-6371320ca5d9, infoPort=45215, infoSecurePort=0, ipcPort=43989, storageInfo=lv=-57;cid=testClusterID;nsid=373864911;c=1731504888871) 2024-11-13T13:35:04,563 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x742412c66045b236 with lease ID 0x2515494a6e271d75: from storage DS-015271de-8eac-4900-8d07-4f9567b2b004 node DatanodeRegistration(127.0.0.1:33931, datanodeUuid=8f99e9e2-208b-40da-abea-6371320ca5d9, infoPort=45215, infoSecurePort=0, ipcPort=43989, storageInfo=lv=-57;cid=testClusterID;nsid=373864911;c=1731504888871), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-13T13:35:04,563 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x742412c66045b236 with lease ID 0x2515494a6e271d75: Processing first storage report for DS-ae5ce0db-e553-4f20-9686-e8e7e99030b2 from datanode DatanodeRegistration(127.0.0.1:33931, datanodeUuid=8f99e9e2-208b-40da-abea-6371320ca5d9, infoPort=45215, infoSecurePort=0, ipcPort=43989, storageInfo=lv=-57;cid=testClusterID;nsid=373864911;c=1731504888871) 2024-11-13T13:35:04,563 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x742412c66045b236 with lease ID 0x2515494a6e271d75: from storage DS-ae5ce0db-e553-4f20-9686-e8e7e99030b2 node DatanodeRegistration(127.0.0.1:33931, datanodeUuid=8f99e9e2-208b-40da-abea-6371320ca5d9, infoPort=45215, infoSecurePort=0, ipcPort=43989, storageInfo=lv=-57;cid=testClusterID;nsid=373864911;c=1731504888871), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-13T13:35:04,602 WARN [Thread-874 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/210997d4-e94b-09b4-185f-d9911e45a816/cluster_4b5f2985-1792-e678-40f5-a273020dd5a5/data/data8/current/BP-1828668861-172.17.0.2-1731504888871/current, will proceed with Du for space computation calculation, 2024-11-13T13:35:04,602 WARN [Thread-873 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/210997d4-e94b-09b4-185f-d9911e45a816/cluster_4b5f2985-1792-e678-40f5-a273020dd5a5/data/data7/current/BP-1828668861-172.17.0.2-1731504888871/current, will proceed with Du for space computation calculation, 2024-11-13T13:35:04,630 WARN [Thread-825 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-13T13:35:04,634 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x2c9497693cbef908 with lease ID 0x2515494a6e271d76: Processing first storage report for DS-96fcb019-1c76-495f-b5d6-3eab33ac87a7 from datanode DatanodeRegistration(127.0.0.1:34283, datanodeUuid=167b0498-1c57-4d91-941f-78ed73e83f00, infoPort=42571, infoSecurePort=0, ipcPort=34023, storageInfo=lv=-57;cid=testClusterID;nsid=373864911;c=1731504888871) 2024-11-13T13:35:04,634 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x2c9497693cbef908 with lease ID 0x2515494a6e271d76: from storage DS-96fcb019-1c76-495f-b5d6-3eab33ac87a7 node DatanodeRegistration(127.0.0.1:34283, datanodeUuid=167b0498-1c57-4d91-941f-78ed73e83f00, infoPort=42571, infoSecurePort=0, ipcPort=34023, storageInfo=lv=-57;cid=testClusterID;nsid=373864911;c=1731504888871), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-13T13:35:04,635 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x2c9497693cbef908 with lease ID 0x2515494a6e271d76: Processing first storage report for DS-cd357f00-d024-4b25-95f2-22ffeb078dcb from datanode DatanodeRegistration(127.0.0.1:34283, datanodeUuid=167b0498-1c57-4d91-941f-78ed73e83f00, infoPort=42571, infoSecurePort=0, ipcPort=34023, storageInfo=lv=-57;cid=testClusterID;nsid=373864911;c=1731504888871) 2024-11-13T13:35:04,635 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x2c9497693cbef908 with lease ID 0x2515494a6e271d76: from storage DS-cd357f00-d024-4b25-95f2-22ffeb078dcb node DatanodeRegistration(127.0.0.1:34283, datanodeUuid=167b0498-1c57-4d91-941f-78ed73e83f00, infoPort=42571, infoSecurePort=0, ipcPort=34023, storageInfo=lv=-57;cid=testClusterID;nsid=373864911;c=1731504888871), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-13T13:35:04,676 WARN [Thread-885 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/210997d4-e94b-09b4-185f-d9911e45a816/cluster_4b5f2985-1792-e678-40f5-a273020dd5a5/data/data10/current/BP-1828668861-172.17.0.2-1731504888871/current, will proceed with Du for space computation calculation, 2024-11-13T13:35:04,676 WARN [Thread-884 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/210997d4-e94b-09b4-185f-d9911e45a816/cluster_4b5f2985-1792-e678-40f5-a273020dd5a5/data/data9/current/BP-1828668861-172.17.0.2-1731504888871/current, will proceed with Du for space computation calculation, 2024-11-13T13:35:04,709 WARN [Thread-847 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-13T13:35:04,715 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x20cdaf94fd95f8c0 with lease ID 0x2515494a6e271d77: Processing first storage report for DS-1c4da21f-127f-48b1-b5b4-ef967b026978 from datanode DatanodeRegistration(127.0.0.1:33563, datanodeUuid=71713783-8b98-44ec-8317-a0ae89e00fb4, infoPort=44489, infoSecurePort=0, ipcPort=36097, storageInfo=lv=-57;cid=testClusterID;nsid=373864911;c=1731504888871) 2024-11-13T13:35:04,715 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x20cdaf94fd95f8c0 with lease ID 0x2515494a6e271d77: from storage DS-1c4da21f-127f-48b1-b5b4-ef967b026978 node DatanodeRegistration(127.0.0.1:33563, datanodeUuid=71713783-8b98-44ec-8317-a0ae89e00fb4, infoPort=44489, infoSecurePort=0, ipcPort=36097, storageInfo=lv=-57;cid=testClusterID;nsid=373864911;c=1731504888871), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-13T13:35:04,715 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x20cdaf94fd95f8c0 with lease ID 0x2515494a6e271d77: Processing first storage report for DS-dbaf4abe-6002-412e-9de7-06e2031865fc from datanode DatanodeRegistration(127.0.0.1:33563, datanodeUuid=71713783-8b98-44ec-8317-a0ae89e00fb4, infoPort=44489, infoSecurePort=0, ipcPort=36097, storageInfo=lv=-57;cid=testClusterID;nsid=373864911;c=1731504888871) 2024-11-13T13:35:04,715 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x20cdaf94fd95f8c0 with lease ID 0x2515494a6e271d77: from storage DS-dbaf4abe-6002-412e-9de7-06e2031865fc node DatanodeRegistration(127.0.0.1:33563, datanodeUuid=71713783-8b98-44ec-8317-a0ae89e00fb4, infoPort=44489, infoSecurePort=0, ipcPort=36097, storageInfo=lv=-57;cid=testClusterID;nsid=373864911;c=1731504888871), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-13T13:35:04,802 WARN [ResponseProcessor for block BP-1828668861-172.17.0.2-1731504888871:blk_1073741837_1013 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1828668861-172.17.0.2-1731504888871:blk_1073741837_1013 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T13:35:04,803 WARN [DataStreamer for file /user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,38781,1731504892507/bfeb2336aed7%2C38781%2C1731504892507.1731504892747 block BP-1828668861-172.17.0.2-1731504888871:blk_1073741837_1013 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1828668861-172.17.0.2-1731504888871:blk_1073741837_1013 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43767,DS-077399b2-6c0a-44a1-87c0-773de642c11b,DISK], DatanodeInfoWithStorage[127.0.0.1:35467,DS-b26c0838-0080-4619-916b-50cda0a016e8,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:43767,DS-077399b2-6c0a-44a1-87c0-773de642c11b,DISK]) is bad. 2024-11-13T13:35:04,804 WARN [ResponseProcessor for block BP-1828668861-172.17.0.2-1731504888871:blk_1073741833_1009 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1828668861-172.17.0.2-1731504888871:blk_1073741833_1009 java.io.IOException: Bad response ERROR for BP-1828668861-172.17.0.2-1731504888871:blk_1073741833_1009 from datanode DatanodeInfoWithStorage[127.0.0.1:43767,DS-077399b2-6c0a-44a1-87c0-773de642c11b,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T13:35:04,805 WARN [DataStreamer for file /user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,33523,1731504891245/bfeb2336aed7%2C33523%2C1731504891245.1731504891893 block BP-1828668861-172.17.0.2-1731504888871:blk_1073741833_1009 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1828668861-172.17.0.2-1731504888871:blk_1073741833_1009 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35467,DS-b26c0838-0080-4619-916b-50cda0a016e8,DISK], DatanodeInfoWithStorage[127.0.0.1:43767,DS-077399b2-6c0a-44a1-87c0-773de642c11b,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:43767,DS-077399b2-6c0a-44a1-87c0-773de642c11b,DISK]) is bad. 2024-11-13T13:35:04,805 WARN [ResponseProcessor for block BP-1828668861-172.17.0.2-1731504888871:blk_1073741830_1006 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1828668861-172.17.0.2-1731504888871:blk_1073741830_1006 java.io.IOException: Bad response ERROR for BP-1828668861-172.17.0.2-1731504888871:blk_1073741830_1006 from datanode DatanodeInfoWithStorage[127.0.0.1:43767,DS-077399b2-6c0a-44a1-87c0-773de642c11b,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T13:35:04,806 WARN [DataStreamer for file /user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/MasterData/WALs/bfeb2336aed7,39113,1731504891075/bfeb2336aed7%2C39113%2C1731504891075.1731504891382 block BP-1828668861-172.17.0.2-1731504888871:blk_1073741830_1006 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1828668861-172.17.0.2-1731504888871:blk_1073741830_1006 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35467,DS-b26c0838-0080-4619-916b-50cda0a016e8,DISK], DatanodeInfoWithStorage[127.0.0.1:43767,DS-077399b2-6c0a-44a1-87c0-773de642c11b,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:43767,DS-077399b2-6c0a-44a1-87c0-773de642c11b,DISK]) is bad. 2024-11-13T13:35:04,807 WARN [ResponseProcessor for block BP-1828668861-172.17.0.2-1731504888871:blk_1073741834_1010 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1828668861-172.17.0.2-1731504888871:blk_1073741834_1010 java.io.IOException: Bad response ERROR for BP-1828668861-172.17.0.2-1731504888871:blk_1073741834_1010 from datanode DatanodeInfoWithStorage[127.0.0.1:43767,DS-077399b2-6c0a-44a1-87c0-773de642c11b,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T13:35:04,807 WARN [PacketResponder: BP-1828668861-172.17.0.2-1731504888871:blk_1073741833_1009, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:43767] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Broken pipe at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T13:35:04,808 WARN [PacketResponder: BP-1828668861-172.17.0.2-1731504888871:blk_1073741830_1006, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:43767] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Broken pipe at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T13:35:04,809 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_789112729_22 at /127.0.0.1:43080 [Receiving block BP-1828668861-172.17.0.2-1731504888871:blk_1073741837_1013] {}] datanode.DataXceiver(331): 127.0.0.1:35467:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:43080 dst: /127.0.0.1:35467 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T13:35:04,809 WARN [DataStreamer for file /user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,33523,1731504891245/bfeb2336aed7%2C33523%2C1731504891245.meta.1731504892258.meta block BP-1828668861-172.17.0.2-1731504888871:blk_1073741834_1010 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1828668861-172.17.0.2-1731504888871:blk_1073741834_1010 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35467,DS-b26c0838-0080-4619-916b-50cda0a016e8,DISK], DatanodeInfoWithStorage[127.0.0.1:43767,DS-077399b2-6c0a-44a1-87c0-773de642c11b,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:43767,DS-077399b2-6c0a-44a1-87c0-773de642c11b,DISK]) is bad. 2024-11-13T13:35:04,809 WARN [PacketResponder: BP-1828668861-172.17.0.2-1731504888871:blk_1073741834_1010, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:43767] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T13:35:04,809 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1667689267_22 at /127.0.0.1:43040 [Receiving block BP-1828668861-172.17.0.2-1731504888871:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:35467:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:43040 dst: /127.0.0.1:35467 java.net.SocketException: Connection reset at sun.nio.ch.SocketChannelImpl.throwConnectionReset(SocketChannelImpl.java:394) ~[?:?] at sun.nio.ch.SocketChannelImpl.read(SocketChannelImpl.java:426) ~[?:?] at org.apache.hadoop.net.SocketInputStream$Reader.performIO(SocketInputStream.java:57) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T13:35:04,810 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1667689267_22 at /127.0.0.1:43054 [Receiving block BP-1828668861-172.17.0.2-1731504888871:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:35467:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:43054 dst: /127.0.0.1:35467 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T13:35:04,810 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1667689267_22 at /127.0.0.1:33946 [Receiving block BP-1828668861-172.17.0.2-1731504888871:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:43767:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:33946 dst: /127.0.0.1:43767 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T13:35:04,810 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1667689267_22 at /127.0.0.1:33930 [Receiving block BP-1828668861-172.17.0.2-1731504888871:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:43767:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:33930 dst: /127.0.0.1:43767 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T13:35:04,809 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_789112729_22 at /127.0.0.1:33978 [Receiving block BP-1828668861-172.17.0.2-1731504888871:blk_1073741837_1013] {}] datanode.DataXceiver(331): 127.0.0.1:43767:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:33978 dst: /127.0.0.1:43767 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T13:35:04,809 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1975906898_22 at /127.0.0.1:56690 [Receiving block BP-1828668861-172.17.0.2-1731504888871:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:43767:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:56690 dst: /127.0.0.1:43767 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T13:35:04,810 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1975906898_22 at /127.0.0.1:41528 [Receiving block BP-1828668861-172.17.0.2-1731504888871:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:35467:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:41528 dst: /127.0.0.1:35467 java.net.SocketException: Connection reset at sun.nio.ch.SocketChannelImpl.throwConnectionReset(SocketChannelImpl.java:394) ~[?:?] at sun.nio.ch.SocketChannelImpl.read(SocketChannelImpl.java:426) ~[?:?] at org.apache.hadoop.net.SocketInputStream$Reader.performIO(SocketInputStream.java:57) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T13:35:04,813 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@30e8c2b{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-13T13:35:04,813 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@2dc8ddff{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-13T13:35:04,813 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-13T13:35:04,814 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@401bd933{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-13T13:35:04,814 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@d35fd72{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/210997d4-e94b-09b4-185f-d9911e45a816/hadoop.log.dir/,STOPPED} 2024-11-13T13:35:04,815 WARN [BP-1828668861-172.17.0.2-1731504888871 heartbeating to localhost/127.0.0.1:40141 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-13T13:35:04,815 WARN [BP-1828668861-172.17.0.2-1731504888871 heartbeating to localhost/127.0.0.1:40141 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1828668861-172.17.0.2-1731504888871 (Datanode Uuid e06bd827-5889-475e-8fba-56aa016f4c7b) service to localhost/127.0.0.1:40141 2024-11-13T13:35:04,815 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-13T13:35:04,815 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-13T13:35:04,815 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/210997d4-e94b-09b4-185f-d9911e45a816/cluster_4b5f2985-1792-e678-40f5-a273020dd5a5/data/data3/current/BP-1828668861-172.17.0.2-1731504888871 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-13T13:35:04,816 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/210997d4-e94b-09b4-185f-d9911e45a816/cluster_4b5f2985-1792-e678-40f5-a273020dd5a5/data/data4/current/BP-1828668861-172.17.0.2-1731504888871 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-13T13:35:04,816 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-13T13:35:04,822 WARN [DataStreamer for file /user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,38781,1731504892507/bfeb2336aed7%2C38781%2C1731504892507.1731504892747 block BP-1828668861-172.17.0.2-1731504888871:blk_1073741837_1013 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741837_1013 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T13:35:04,828 ERROR [org.apache.hadoop.hdfs.server.datanode.DataXceiver@38cdedad {}] datanode.DataXceiver(331): 127.0.0.1:35467:DataXceiver error processing unknown operation src: /127.0.0.1:57920 dst: /127.0.0.1:35467 java.io.IOException: Server closed. at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.addPeer(DataXceiverServer.java:334) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:232) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T13:35:04,828 WARN [DataStreamer for file /user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,33523,1731504891245/bfeb2336aed7%2C33523%2C1731504891245.meta.1731504892258.meta block BP-1828668861-172.17.0.2-1731504888871:blk_1073741834_1010 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741834_1010 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T13:35:04,828 WARN [DataStreamer for file /user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/MasterData/WALs/bfeb2336aed7,39113,1731504891075/bfeb2336aed7%2C39113%2C1731504891075.1731504891382 block BP-1828668861-172.17.0.2-1731504888871:blk_1073741830_1006 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741830_1006 java.net.SocketException: Connection reset at sun.nio.ch.SocketChannelImpl.throwConnectionReset(SocketChannelImpl.java:394) ~[?:?] at sun.nio.ch.SocketChannelImpl.read(SocketChannelImpl.java:426) ~[?:?] at org.apache.hadoop.net.SocketInputStream$Reader.performIO(SocketInputStream.java:57) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:118) ~[hadoop-common-3.4.1.jar:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:527) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1931) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T13:35:04,829 WARN [DataStreamer for file /user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,33523,1731504891245/bfeb2336aed7%2C33523%2C1731504891245.1731504891893 block BP-1828668861-172.17.0.2-1731504888871:blk_1073741833_1009 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741833_1009 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T13:35:04,831 WARN [BP-1828668861-172.17.0.2-1731504888871 heartbeating to localhost/127.0.0.1:40141 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1828668861-172.17.0.2-1731504888871 (Datanode Uuid 2aebb4ac-4601-45ab-99a7-bb640f705766) service to localhost/127.0.0.1:40141 2024-11-13T13:35:04,832 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/210997d4-e94b-09b4-185f-d9911e45a816/cluster_4b5f2985-1792-e678-40f5-a273020dd5a5/data/data1/current/BP-1828668861-172.17.0.2-1731504888871 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-13T13:35:04,833 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/210997d4-e94b-09b4-185f-d9911e45a816/cluster_4b5f2985-1792-e678-40f5-a273020dd5a5/data/data2/current/BP-1828668861-172.17.0.2-1731504888871 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-13T13:35:04,837 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7efd52f6{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-13T13:35:04,838 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@21f2acf7{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-13T13:35:04,838 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-13T13:35:04,838 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4c80aceb{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-13T13:35:04,838 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4549eece{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/210997d4-e94b-09b4-185f-d9911e45a816/hadoop.log.dir/,STOPPED} 2024-11-13T13:35:04,839 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-13T13:35:04,843 DEBUG [RPCClient-NioEventLoopGroup-4-10 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRollOnDatanodeDeath', row='row0002', locateType=CURRENT is [region=TestLogRolling-testLogRollOnDatanodeDeath,,1731504892646.791221fd56c9a0e65b7eca684d5a205f., hostname=bfeb2336aed7,33523,1731504891245, seqNum=2] 2024-11-13T13:35:04,844 ERROR [FSHLog-0-hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239-prefix:bfeb2336aed7,33523,1731504891245 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35467,DS-b26c0838-0080-4619-916b-50cda0a016e8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T13:35:04,845 WARN [FSHLog-0-hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239-prefix:bfeb2336aed7,33523,1731504891245 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35467,DS-b26c0838-0080-4619-916b-50cda0a016e8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T13:35:04,845 INFO [regionserver/bfeb2336aed7:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35467,DS-b26c0838-0080-4619-916b-50cda0a016e8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T13:35:04,845 DEBUG [regionserver/bfeb2336aed7:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog bfeb2336aed7%2C33523%2C1731504891245:(num 1731504891893) roll requested 2024-11-13T13:35:04,845 INFO [regionserver/bfeb2336aed7:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor bfeb2336aed7%2C33523%2C1731504891245.1731504904845 2024-11-13T13:35:04,851 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:35:04,851 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:35:04,852 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:35:04,852 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:35:04,852 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:35:04,852 INFO [regionserver/bfeb2336aed7:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,33523,1731504891245/bfeb2336aed7%2C33523%2C1731504891245.1731504891893 with entries=1, filesize=455 B; new WAL /user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,33523,1731504891245/bfeb2336aed7%2C33523%2C1731504891245.1731504904845 2024-11-13T13:35:04,852 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35467,DS-b26c0838-0080-4619-916b-50cda0a016e8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T13:35:04,852 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35467,DS-b26c0838-0080-4619-916b-50cda0a016e8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T13:35:04,853 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(47): Initialize RecoverLeaseFSUtils 2024-11-13T13:35:04,854 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(59): set recoverLeaseMethod to org.apache.hadoop.fs.LeaseRecoverable.recoverLease() 2024-11-13T13:35:04,854 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,33523,1731504891245/bfeb2336aed7%2C33523%2C1731504891245.1731504891893 2024-11-13T13:35:04,856 WARN [IPC Server handler 3 on default port 40141 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,33523,1731504891245/bfeb2336aed7%2C33523%2C1731504891245.1731504891893 has not been closed. Lease recovery is in progress. RecoveryId = 1019 for block blk_1073741833_1009 2024-11-13T13:35:04,858 DEBUG [regionserver/bfeb2336aed7:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44489:44489),(127.0.0.1/127.0.0.1:42571:42571)] 2024-11-13T13:35:04,858 DEBUG [regionserver/bfeb2336aed7:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,33523,1731504891245/bfeb2336aed7%2C33523%2C1731504891245.1731504891893 is not closed yet, will try archiving it next time 2024-11-13T13:35:04,860 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,33523,1731504891245/bfeb2336aed7%2C33523%2C1731504891245.1731504891893 after 4ms 2024-11-13T13:35:05,359 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35467,DS-b26c0838-0080-4619-916b-50cda0a016e8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T13:35:06,613 INFO [regionserver/bfeb2336aed7:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35467,DS-b26c0838-0080-4619-916b-50cda0a016e8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T13:35:06,859 INFO [regionserver/bfeb2336aed7:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35467,DS-b26c0838-0080-4619-916b-50cda0a016e8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T13:35:06,860 INFO [Time-limited test {}] wal.TestLogRolling(261): log.getCurrentFileName(): hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,33523,1731504891245/bfeb2336aed7%2C33523%2C1731504891245.1731504904845 2024-11-13T13:35:06,860 WARN [ResponseProcessor for block BP-1828668861-172.17.0.2-1731504888871:blk_1073741838_1018 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1828668861-172.17.0.2-1731504888871:blk_1073741838_1018 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T13:35:06,861 WARN [DataStreamer for file /user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,33523,1731504891245/bfeb2336aed7%2C33523%2C1731504891245.1731504904845 block BP-1828668861-172.17.0.2-1731504888871:blk_1073741838_1018 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1828668861-172.17.0.2-1731504888871:blk_1073741838_1018 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33563,DS-1c4da21f-127f-48b1-b5b4-ef967b026978,DISK], DatanodeInfoWithStorage[127.0.0.1:34283,DS-96fcb019-1c76-495f-b5d6-3eab33ac87a7,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33563,DS-1c4da21f-127f-48b1-b5b4-ef967b026978,DISK]) is bad. 2024-11-13T13:35:06,861 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1667689267_22 at /127.0.0.1:34248 [Receiving block BP-1828668861-172.17.0.2-1731504888871:blk_1073741838_1018] {}] datanode.DataXceiver(331): 127.0.0.1:33563:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:34248 dst: /127.0.0.1:33563 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T13:35:06,861 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1667689267_22 at /127.0.0.1:45090 [Receiving block BP-1828668861-172.17.0.2-1731504888871:blk_1073741838_1018] {}] datanode.DataXceiver(331): 127.0.0.1:34283:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:45090 dst: /127.0.0.1:34283 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T13:35:06,980 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@f0ef2ce{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-13T13:35:06,981 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@37673872{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-13T13:35:06,981 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-13T13:35:06,981 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4949cd53{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-13T13:35:06,982 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@56a8b58e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/210997d4-e94b-09b4-185f-d9911e45a816/hadoop.log.dir/,STOPPED} 2024-11-13T13:35:06,983 WARN [BP-1828668861-172.17.0.2-1731504888871 heartbeating to localhost/127.0.0.1:40141 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-13T13:35:06,983 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-13T13:35:06,983 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-13T13:35:06,983 WARN [BP-1828668861-172.17.0.2-1731504888871 heartbeating to localhost/127.0.0.1:40141 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1828668861-172.17.0.2-1731504888871 (Datanode Uuid 71713783-8b98-44ec-8317-a0ae89e00fb4) service to localhost/127.0.0.1:40141 2024-11-13T13:35:06,984 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/210997d4-e94b-09b4-185f-d9911e45a816/cluster_4b5f2985-1792-e678-40f5-a273020dd5a5/data/data9/current/BP-1828668861-172.17.0.2-1731504888871 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-13T13:35:06,984 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/210997d4-e94b-09b4-185f-d9911e45a816/cluster_4b5f2985-1792-e678-40f5-a273020dd5a5/data/data10/current/BP-1828668861-172.17.0.2-1731504888871 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-13T13:35:06,984 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-13T13:35:07,359 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35467,DS-b26c0838-0080-4619-916b-50cda0a016e8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T13:35:08,613 INFO [regionserver/bfeb2336aed7:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35467,DS-b26c0838-0080-4619-916b-50cda0a016e8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T13:35:08,859 WARN [regionserver/bfeb2336aed7:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:34283,DS-96fcb019-1c76-495f-b5d6-3eab33ac87a7,DISK]] 2024-11-13T13:35:08,859 INFO [regionserver/bfeb2336aed7:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35467,DS-b26c0838-0080-4619-916b-50cda0a016e8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T13:35:08,860 DEBUG [regionserver/bfeb2336aed7:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog bfeb2336aed7%2C33523%2C1731504891245:(num 1731504904845) roll requested 2024-11-13T13:35:08,860 INFO [regionserver/bfeb2336aed7:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor bfeb2336aed7%2C33523%2C1731504891245.1731504908860 2024-11-13T13:35:08,861 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,33523,1731504891245/bfeb2336aed7%2C33523%2C1731504891245.1731504891893 after 4007ms 2024-11-13T13:35:08,863 WARN [Thread-905 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741839_1021 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T13:35:08,863 WARN [Thread-905 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1828668861-172.17.0.2-1731504888871:blk_1073741839_1021 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33563,DS-1c4da21f-127f-48b1-b5b4-ef967b026978,DISK], DatanodeInfoWithStorage[127.0.0.1:34283,DS-96fcb019-1c76-495f-b5d6-3eab33ac87a7,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33563,DS-1c4da21f-127f-48b1-b5b4-ef967b026978,DISK]) is bad. 2024-11-13T13:35:08,863 WARN [Thread-905 {}] hdfs.DataStreamer(1850): Abandoning BP-1828668861-172.17.0.2-1731504888871:blk_1073741839_1021 2024-11-13T13:35:08,866 WARN [Thread-905 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33563,DS-1c4da21f-127f-48b1-b5b4-ef967b026978,DISK] 2024-11-13T13:35:08,871 WARN [Thread-905 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741840_1022 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:43767 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T13:35:08,871 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1667689267_22 at /127.0.0.1:45098 [Receiving block BP-1828668861-172.17.0.2-1731504888871:blk_1073741840_1022] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/210997d4-e94b-09b4-185f-d9911e45a816/cluster_4b5f2985-1792-e678-40f5-a273020dd5a5/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/210997d4-e94b-09b4-185f-d9911e45a816/cluster_4b5f2985-1792-e678-40f5-a273020dd5a5/data/data8]'}, localName='127.0.0.1:34283', datanodeUuid='167b0498-1c57-4d91-941f-78ed73e83f00', xmitsInProgress=0}:Exception transferring block BP-1828668861-172.17.0.2-1731504888871:blk_1073741840_1022 to mirror 127.0.0.1:43767 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T13:35:08,871 WARN [Thread-905 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1828668861-172.17.0.2-1731504888871:blk_1073741840_1022 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34283,DS-96fcb019-1c76-495f-b5d6-3eab33ac87a7,DISK], DatanodeInfoWithStorage[127.0.0.1:43767,DS-077399b2-6c0a-44a1-87c0-773de642c11b,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:43767,DS-077399b2-6c0a-44a1-87c0-773de642c11b,DISK]) is bad. 2024-11-13T13:35:08,871 WARN [Thread-905 {}] hdfs.DataStreamer(1850): Abandoning BP-1828668861-172.17.0.2-1731504888871:blk_1073741840_1022 2024-11-13T13:35:08,871 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1667689267_22 at /127.0.0.1:45098 [Receiving block BP-1828668861-172.17.0.2-1731504888871:blk_1073741840_1022] {}] datanode.BlockReceiver(316): Block 1073741840 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-13T13:35:08,871 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1667689267_22 at /127.0.0.1:45098 [Receiving block BP-1828668861-172.17.0.2-1731504888871:blk_1073741840_1022] {}] datanode.DataXceiver(331): 127.0.0.1:34283:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:45098 dst: /127.0.0.1:34283 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T13:35:08,872 WARN [Thread-905 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:43767,DS-077399b2-6c0a-44a1-87c0-773de642c11b,DISK] 2024-11-13T13:35:08,873 WARN [Thread-905 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741841_1023 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T13:35:08,873 WARN [Thread-905 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1828668861-172.17.0.2-1731504888871:blk_1073741841_1023 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35467,DS-b26c0838-0080-4619-916b-50cda0a016e8,DISK], DatanodeInfoWithStorage[127.0.0.1:33931,DS-015271de-8eac-4900-8d07-4f9567b2b004,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:35467,DS-b26c0838-0080-4619-916b-50cda0a016e8,DISK]) is bad. 2024-11-13T13:35:08,874 WARN [Thread-905 {}] hdfs.DataStreamer(1850): Abandoning BP-1828668861-172.17.0.2-1731504888871:blk_1073741841_1023 2024-11-13T13:35:08,874 WARN [Thread-905 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35467,DS-b26c0838-0080-4619-916b-50cda0a016e8,DISK] 2024-11-13T13:35:08,879 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:35:08,879 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:35:08,879 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:35:08,879 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:35:08,879 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:35:08,879 INFO [regionserver/bfeb2336aed7:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,33523,1731504891245/bfeb2336aed7%2C33523%2C1731504891245.1731504904845 with entries=2, filesize=2.37 KB; new WAL /user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,33523,1731504891245/bfeb2336aed7%2C33523%2C1731504891245.1731504908860 2024-11-13T13:35:08,880 DEBUG [regionserver/bfeb2336aed7:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45215:45215),(127.0.0.1/127.0.0.1:42571:42571)] 2024-11-13T13:35:08,880 DEBUG [regionserver/bfeb2336aed7:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,33523,1731504891245/bfeb2336aed7%2C33523%2C1731504891245.1731504891893 is not closed yet, will try archiving it next time 2024-11-13T13:35:08,880 DEBUG [regionserver/bfeb2336aed7:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,33523,1731504891245/bfeb2336aed7%2C33523%2C1731504891245.1731504904845 is not closed yet, will try archiving it next time 2024-11-13T13:35:08,881 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34283 is added to blk_1073741838_1020 (size=2431) 2024-11-13T13:35:08,883 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,33523,1731504891245/bfeb2336aed7%2C33523%2C1731504891245.1731504891893 is not closed yet, will try archiving it next time 2024-11-13T13:35:08,989 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-11-13T13:35:09,359 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35467,DS-b26c0838-0080-4619-916b-50cda0a016e8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T13:35:10,613 INFO [regionserver/bfeb2336aed7:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35467,DS-b26c0838-0080-4619-916b-50cda0a016e8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T13:35:10,654 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33931 is added to blk_1073741838_1020 (size=2431) 2024-11-13T13:35:10,880 INFO [regionserver/bfeb2336aed7:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35467,DS-b26c0838-0080-4619-916b-50cda0a016e8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T13:35:10,993 WARN [ResponseProcessor for block BP-1828668861-172.17.0.2-1731504888871:blk_1073741842_1024 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1828668861-172.17.0.2-1731504888871:blk_1073741842_1024 java.io.IOException: Bad response ERROR for BP-1828668861-172.17.0.2-1731504888871:blk_1073741842_1024 from datanode DatanodeInfoWithStorage[127.0.0.1:34283,DS-96fcb019-1c76-495f-b5d6-3eab33ac87a7,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T13:35:10,993 WARN [DataStreamer for file /user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,33523,1731504891245/bfeb2336aed7%2C33523%2C1731504891245.1731504908860 block BP-1828668861-172.17.0.2-1731504888871:blk_1073741842_1024 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1828668861-172.17.0.2-1731504888871:blk_1073741842_1024 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33931,DS-015271de-8eac-4900-8d07-4f9567b2b004,DISK], DatanodeInfoWithStorage[127.0.0.1:34283,DS-96fcb019-1c76-495f-b5d6-3eab33ac87a7,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:34283,DS-96fcb019-1c76-495f-b5d6-3eab33ac87a7,DISK]) is bad. 2024-11-13T13:35:10,994 WARN [PacketResponder: BP-1828668861-172.17.0.2-1731504888871:blk_1073741842_1024, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:34283] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T13:35:10,995 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1667689267_22 at /127.0.0.1:45110 [Receiving block BP-1828668861-172.17.0.2-1731504888871:blk_1073741842_1024] {}] datanode.DataXceiver(331): 127.0.0.1:34283:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:45110 dst: /127.0.0.1:34283 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T13:35:10,996 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@e562151{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-13T13:35:10,996 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1667689267_22 at /127.0.0.1:41548 [Receiving block BP-1828668861-172.17.0.2-1731504888871:blk_1073741842_1024] {}] datanode.DataXceiver(331): 127.0.0.1:33931:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:41548 dst: /127.0.0.1:33931 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T13:35:10,997 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@7cde9b58{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-13T13:35:10,997 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-13T13:35:10,997 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@756ea16d{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-13T13:35:10,997 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5d4dc0bd{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/210997d4-e94b-09b4-185f-d9911e45a816/hadoop.log.dir/,STOPPED} 2024-11-13T13:35:11,000 WARN [BP-1828668861-172.17.0.2-1731504888871 heartbeating to localhost/127.0.0.1:40141 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-13T13:35:11,000 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-13T13:35:11,000 WARN [BP-1828668861-172.17.0.2-1731504888871 heartbeating to localhost/127.0.0.1:40141 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1828668861-172.17.0.2-1731504888871 (Datanode Uuid 167b0498-1c57-4d91-941f-78ed73e83f00) service to localhost/127.0.0.1:40141 2024-11-13T13:35:11,000 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-13T13:35:11,000 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/210997d4-e94b-09b4-185f-d9911e45a816/cluster_4b5f2985-1792-e678-40f5-a273020dd5a5/data/data7/current/BP-1828668861-172.17.0.2-1731504888871 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-13T13:35:11,001 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/210997d4-e94b-09b4-185f-d9911e45a816/cluster_4b5f2985-1792-e678-40f5-a273020dd5a5/data/data8/current/BP-1828668861-172.17.0.2-1731504888871 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-13T13:35:11,001 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-13T13:35:11,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33523 {}] regionserver.HRegion(8855): Flush requested on 791221fd56c9a0e65b7eca684d5a205f 2024-11-13T13:35:11,013 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 791221fd56c9a0e65b7eca684d5a205f 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-13T13:35:11,037 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/data/default/TestLogRolling-testLogRollOnDatanodeDeath/791221fd56c9a0e65b7eca684d5a205f/.tmp/info/a89abbf15cac4cad9521dc2989663b30 is 1080, key is row0002/info:/1731504906985/Put/seqid=0 2024-11-13T13:35:11,039 WARN [Thread-918 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741843_1026 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T13:35:11,040 WARN [Thread-918 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1828668861-172.17.0.2-1731504888871:blk_1073741843_1026 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34283,DS-96fcb019-1c76-495f-b5d6-3eab33ac87a7,DISK], DatanodeInfoWithStorage[127.0.0.1:33931,DS-015271de-8eac-4900-8d07-4f9567b2b004,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34283,DS-96fcb019-1c76-495f-b5d6-3eab33ac87a7,DISK]) is bad. 2024-11-13T13:35:11,040 WARN [Thread-918 {}] hdfs.DataStreamer(1850): Abandoning BP-1828668861-172.17.0.2-1731504888871:blk_1073741843_1026 2024-11-13T13:35:11,041 WARN [Thread-918 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34283,DS-96fcb019-1c76-495f-b5d6-3eab33ac87a7,DISK] 2024-11-13T13:35:11,045 WARN [Thread-918 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741844_1027 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:35467 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T13:35:11,045 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1667689267_22 at /127.0.0.1:41584 [Receiving block BP-1828668861-172.17.0.2-1731504888871:blk_1073741844_1027] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/210997d4-e94b-09b4-185f-d9911e45a816/cluster_4b5f2985-1792-e678-40f5-a273020dd5a5/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/210997d4-e94b-09b4-185f-d9911e45a816/cluster_4b5f2985-1792-e678-40f5-a273020dd5a5/data/data6]'}, localName='127.0.0.1:33931', datanodeUuid='8f99e9e2-208b-40da-abea-6371320ca5d9', xmitsInProgress=0}:Exception transferring block BP-1828668861-172.17.0.2-1731504888871:blk_1073741844_1027 to mirror 127.0.0.1:35467 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T13:35:11,046 WARN [Thread-918 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1828668861-172.17.0.2-1731504888871:blk_1073741844_1027 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33931,DS-015271de-8eac-4900-8d07-4f9567b2b004,DISK], DatanodeInfoWithStorage[127.0.0.1:35467,DS-b26c0838-0080-4619-916b-50cda0a016e8,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:35467,DS-b26c0838-0080-4619-916b-50cda0a016e8,DISK]) is bad. 2024-11-13T13:35:11,046 WARN [Thread-918 {}] hdfs.DataStreamer(1850): Abandoning BP-1828668861-172.17.0.2-1731504888871:blk_1073741844_1027 2024-11-13T13:35:11,046 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1667689267_22 at /127.0.0.1:41584 [Receiving block BP-1828668861-172.17.0.2-1731504888871:blk_1073741844_1027] {}] datanode.BlockReceiver(316): Block 1073741844 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-13T13:35:11,046 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1667689267_22 at /127.0.0.1:41584 [Receiving block BP-1828668861-172.17.0.2-1731504888871:blk_1073741844_1027] {}] datanode.DataXceiver(331): 127.0.0.1:33931:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:41584 dst: /127.0.0.1:33931 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T13:35:11,046 WARN [Thread-918 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35467,DS-b26c0838-0080-4619-916b-50cda0a016e8,DISK] 2024-11-13T13:35:11,048 WARN [Thread-918 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741845_1028 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T13:35:11,048 WARN [Thread-918 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1828668861-172.17.0.2-1731504888871:blk_1073741845_1028 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43767,DS-077399b2-6c0a-44a1-87c0-773de642c11b,DISK], DatanodeInfoWithStorage[127.0.0.1:33931,DS-015271de-8eac-4900-8d07-4f9567b2b004,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:43767,DS-077399b2-6c0a-44a1-87c0-773de642c11b,DISK]) is bad. 2024-11-13T13:35:11,048 WARN [Thread-918 {}] hdfs.DataStreamer(1850): Abandoning BP-1828668861-172.17.0.2-1731504888871:blk_1073741845_1028 2024-11-13T13:35:11,050 WARN [Thread-918 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:43767,DS-077399b2-6c0a-44a1-87c0-773de642c11b,DISK] 2024-11-13T13:35:11,057 WARN [Thread-918 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741846_1029 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:33563 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T13:35:11,057 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1667689267_22 at /127.0.0.1:41598 [Receiving block BP-1828668861-172.17.0.2-1731504888871:blk_1073741846_1029] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/210997d4-e94b-09b4-185f-d9911e45a816/cluster_4b5f2985-1792-e678-40f5-a273020dd5a5/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/210997d4-e94b-09b4-185f-d9911e45a816/cluster_4b5f2985-1792-e678-40f5-a273020dd5a5/data/data6]'}, localName='127.0.0.1:33931', datanodeUuid='8f99e9e2-208b-40da-abea-6371320ca5d9', xmitsInProgress=0}:Exception transferring block BP-1828668861-172.17.0.2-1731504888871:blk_1073741846_1029 to mirror 127.0.0.1:33563 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T13:35:11,058 WARN [Thread-918 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1828668861-172.17.0.2-1731504888871:blk_1073741846_1029 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33931,DS-015271de-8eac-4900-8d07-4f9567b2b004,DISK], DatanodeInfoWithStorage[127.0.0.1:33563,DS-1c4da21f-127f-48b1-b5b4-ef967b026978,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:33563,DS-1c4da21f-127f-48b1-b5b4-ef967b026978,DISK]) is bad. 2024-11-13T13:35:11,058 WARN [Thread-918 {}] hdfs.DataStreamer(1850): Abandoning BP-1828668861-172.17.0.2-1731504888871:blk_1073741846_1029 2024-11-13T13:35:11,058 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1667689267_22 at /127.0.0.1:41598 [Receiving block BP-1828668861-172.17.0.2-1731504888871:blk_1073741846_1029] {}] datanode.BlockReceiver(316): Block 1073741846 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-13T13:35:11,058 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1667689267_22 at /127.0.0.1:41598 [Receiving block BP-1828668861-172.17.0.2-1731504888871:blk_1073741846_1029] {}] datanode.DataXceiver(331): 127.0.0.1:33931:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:41598 dst: /127.0.0.1:33931 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T13:35:11,058 WARN [Thread-918 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33563,DS-1c4da21f-127f-48b1-b5b4-ef967b026978,DISK] 2024-11-13T13:35:11,059 WARN [IPC Server handler 0 on default port 40141 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-13T13:35:11,060 WARN [IPC Server handler 0 on default port 40141 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-13T13:35:11,060 WARN [IPC Server handler 0 on default port 40141 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-13T13:35:11,064 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33931 is added to blk_1073741847_1030 (size=10347) 2024-11-13T13:35:11,064 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/data/default/TestLogRolling-testLogRollOnDatanodeDeath/791221fd56c9a0e65b7eca684d5a205f/.tmp/info/a89abbf15cac4cad9521dc2989663b30 2024-11-13T13:35:11,078 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/data/default/TestLogRolling-testLogRollOnDatanodeDeath/791221fd56c9a0e65b7eca684d5a205f/.tmp/info/a89abbf15cac4cad9521dc2989663b30 as hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/data/default/TestLogRolling-testLogRollOnDatanodeDeath/791221fd56c9a0e65b7eca684d5a205f/info/a89abbf15cac4cad9521dc2989663b30 2024-11-13T13:35:11,086 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/data/default/TestLogRolling-testLogRollOnDatanodeDeath/791221fd56c9a0e65b7eca684d5a205f/info/a89abbf15cac4cad9521dc2989663b30, entries=5, sequenceid=11, filesize=10.1 K 2024-11-13T13:35:11,087 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=7.35 KB/7531 for 791221fd56c9a0e65b7eca684d5a205f in 74ms, sequenceid=11, compaction requested=false 2024-11-13T13:35:11,087 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 791221fd56c9a0e65b7eca684d5a205f: 2024-11-13T13:35:11,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33523 {}] regionserver.HRegion(8855): Flush requested on 791221fd56c9a0e65b7eca684d5a205f 2024-11-13T13:35:11,235 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 791221fd56c9a0e65b7eca684d5a205f 1/1 column families, dataSize=8.40 KB heapSize=9.25 KB 2024-11-13T13:35:11,240 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/data/default/TestLogRolling-testLogRollOnDatanodeDeath/791221fd56c9a0e65b7eca684d5a205f/.tmp/info/0b910b2348c64a5691f6ec9319489b93 is 1080, key is row0007/info:/1731504911015/Put/seqid=0 2024-11-13T13:35:11,242 WARN [Thread-925 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741848_1031 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T13:35:11,242 WARN [Thread-925 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1828668861-172.17.0.2-1731504888871:blk_1073741848_1031 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33563,DS-1c4da21f-127f-48b1-b5b4-ef967b026978,DISK], DatanodeInfoWithStorage[127.0.0.1:35467,DS-b26c0838-0080-4619-916b-50cda0a016e8,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33563,DS-1c4da21f-127f-48b1-b5b4-ef967b026978,DISK]) is bad. 2024-11-13T13:35:11,242 WARN [Thread-925 {}] hdfs.DataStreamer(1850): Abandoning BP-1828668861-172.17.0.2-1731504888871:blk_1073741848_1031 2024-11-13T13:35:11,243 WARN [Thread-925 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33563,DS-1c4da21f-127f-48b1-b5b4-ef967b026978,DISK] 2024-11-13T13:35:11,244 WARN [Thread-925 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741849_1032 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T13:35:11,245 WARN [Thread-925 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1828668861-172.17.0.2-1731504888871:blk_1073741849_1032 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35467,DS-b26c0838-0080-4619-916b-50cda0a016e8,DISK], DatanodeInfoWithStorage[127.0.0.1:43767,DS-077399b2-6c0a-44a1-87c0-773de642c11b,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:35467,DS-b26c0838-0080-4619-916b-50cda0a016e8,DISK]) is bad. 2024-11-13T13:35:11,245 WARN [Thread-925 {}] hdfs.DataStreamer(1850): Abandoning BP-1828668861-172.17.0.2-1731504888871:blk_1073741849_1032 2024-11-13T13:35:11,245 WARN [Thread-925 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35467,DS-b26c0838-0080-4619-916b-50cda0a016e8,DISK] 2024-11-13T13:35:11,247 WARN [Thread-925 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741850_1033 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T13:35:11,247 WARN [Thread-925 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1828668861-172.17.0.2-1731504888871:blk_1073741850_1033 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34283,DS-96fcb019-1c76-495f-b5d6-3eab33ac87a7,DISK], DatanodeInfoWithStorage[127.0.0.1:33931,DS-015271de-8eac-4900-8d07-4f9567b2b004,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34283,DS-96fcb019-1c76-495f-b5d6-3eab33ac87a7,DISK]) is bad. 2024-11-13T13:35:11,247 WARN [Thread-925 {}] hdfs.DataStreamer(1850): Abandoning BP-1828668861-172.17.0.2-1731504888871:blk_1073741850_1033 2024-11-13T13:35:11,247 WARN [Thread-925 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34283,DS-96fcb019-1c76-495f-b5d6-3eab33ac87a7,DISK] 2024-11-13T13:35:11,248 WARN [Thread-925 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741851_1034 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T13:35:11,249 WARN [Thread-925 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1828668861-172.17.0.2-1731504888871:blk_1073741851_1034 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43767,DS-077399b2-6c0a-44a1-87c0-773de642c11b,DISK], DatanodeInfoWithStorage[127.0.0.1:33931,DS-015271de-8eac-4900-8d07-4f9567b2b004,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:43767,DS-077399b2-6c0a-44a1-87c0-773de642c11b,DISK]) is bad. 2024-11-13T13:35:11,249 WARN [Thread-925 {}] hdfs.DataStreamer(1850): Abandoning BP-1828668861-172.17.0.2-1731504888871:blk_1073741851_1034 2024-11-13T13:35:11,249 WARN [Thread-925 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:43767,DS-077399b2-6c0a-44a1-87c0-773de642c11b,DISK] 2024-11-13T13:35:11,250 WARN [IPC Server handler 1 on default port 40141 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-13T13:35:11,250 WARN [IPC Server handler 1 on default port 40141 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-13T13:35:11,250 WARN [IPC Server handler 1 on default port 40141 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-13T13:35:11,253 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33931 is added to blk_1073741852_1035 (size=12506) 2024-11-13T13:35:11,360 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35467,DS-b26c0838-0080-4619-916b-50cda0a016e8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T13:35:11,654 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.40 KB at sequenceid=22 (bloomFilter=true), to=hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/data/default/TestLogRolling-testLogRollOnDatanodeDeath/791221fd56c9a0e65b7eca684d5a205f/.tmp/info/0b910b2348c64a5691f6ec9319489b93 2024-11-13T13:35:11,662 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/data/default/TestLogRolling-testLogRollOnDatanodeDeath/791221fd56c9a0e65b7eca684d5a205f/.tmp/info/0b910b2348c64a5691f6ec9319489b93 as hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/data/default/TestLogRolling-testLogRollOnDatanodeDeath/791221fd56c9a0e65b7eca684d5a205f/info/0b910b2348c64a5691f6ec9319489b93 2024-11-13T13:35:11,670 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/data/default/TestLogRolling-testLogRollOnDatanodeDeath/791221fd56c9a0e65b7eca684d5a205f/info/0b910b2348c64a5691f6ec9319489b93, entries=7, sequenceid=22, filesize=12.2 K 2024-11-13T13:35:11,671 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~8.40 KB/8606, heapSize ~9.23 KB/9456, currentSize=2.10 KB/2150 for 791221fd56c9a0e65b7eca684d5a205f in 436ms, sequenceid=22, compaction requested=false 2024-11-13T13:35:11,671 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 791221fd56c9a0e65b7eca684d5a205f: 2024-11-13T13:35:11,671 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=22.3 K, sizeToCheck=16.0 K 2024-11-13T13:35:11,671 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-13T13:35:11,671 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/data/default/TestLogRolling-testLogRollOnDatanodeDeath/791221fd56c9a0e65b7eca684d5a205f/info/0b910b2348c64a5691f6ec9319489b93 because midkey is the same as first or last row 2024-11-13T13:35:12,615 INFO [regionserver/bfeb2336aed7:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35467,DS-b26c0838-0080-4619-916b-50cda0a016e8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T13:35:12,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33523 {}] regionserver.HRegion(8855): Flush requested on 791221fd56c9a0e65b7eca684d5a205f 2024-11-13T13:35:12,657 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 791221fd56c9a0e65b7eca684d5a205f 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-13T13:35:12,663 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/data/default/TestLogRolling-testLogRollOnDatanodeDeath/791221fd56c9a0e65b7eca684d5a205f/.tmp/info/752738c2a8c6483eb24ba5fde94235bc is 1079, key is tmprow/info:/1731504912655/Put/seqid=0 2024-11-13T13:35:12,665 WARN [Thread-929 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741853_1036 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T13:35:12,666 WARN [Thread-929 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1828668861-172.17.0.2-1731504888871:blk_1073741853_1036 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35467,DS-b26c0838-0080-4619-916b-50cda0a016e8,DISK], DatanodeInfoWithStorage[127.0.0.1:43767,DS-077399b2-6c0a-44a1-87c0-773de642c11b,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:35467,DS-b26c0838-0080-4619-916b-50cda0a016e8,DISK]) is bad. 2024-11-13T13:35:12,666 WARN [Thread-929 {}] hdfs.DataStreamer(1850): Abandoning BP-1828668861-172.17.0.2-1731504888871:blk_1073741853_1036 2024-11-13T13:35:12,666 WARN [Thread-929 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35467,DS-b26c0838-0080-4619-916b-50cda0a016e8,DISK] 2024-11-13T13:35:12,668 WARN [Thread-929 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741854_1037 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T13:35:12,668 WARN [Thread-929 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1828668861-172.17.0.2-1731504888871:blk_1073741854_1037 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43767,DS-077399b2-6c0a-44a1-87c0-773de642c11b,DISK], DatanodeInfoWithStorage[127.0.0.1:34283,DS-96fcb019-1c76-495f-b5d6-3eab33ac87a7,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:43767,DS-077399b2-6c0a-44a1-87c0-773de642c11b,DISK]) is bad. 2024-11-13T13:35:12,668 WARN [Thread-929 {}] hdfs.DataStreamer(1850): Abandoning BP-1828668861-172.17.0.2-1731504888871:blk_1073741854_1037 2024-11-13T13:35:12,669 WARN [Thread-929 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:43767,DS-077399b2-6c0a-44a1-87c0-773de642c11b,DISK] 2024-11-13T13:35:12,671 WARN [Thread-929 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741855_1038 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:33563 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T13:35:12,670 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1667689267_22 at /127.0.0.1:52620 [Receiving block BP-1828668861-172.17.0.2-1731504888871:blk_1073741855_1038] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/210997d4-e94b-09b4-185f-d9911e45a816/cluster_4b5f2985-1792-e678-40f5-a273020dd5a5/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/210997d4-e94b-09b4-185f-d9911e45a816/cluster_4b5f2985-1792-e678-40f5-a273020dd5a5/data/data6]'}, localName='127.0.0.1:33931', datanodeUuid='8f99e9e2-208b-40da-abea-6371320ca5d9', xmitsInProgress=0}:Exception transferring block BP-1828668861-172.17.0.2-1731504888871:blk_1073741855_1038 to mirror 127.0.0.1:33563 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T13:35:12,671 WARN [Thread-929 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1828668861-172.17.0.2-1731504888871:blk_1073741855_1038 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33931,DS-015271de-8eac-4900-8d07-4f9567b2b004,DISK], DatanodeInfoWithStorage[127.0.0.1:33563,DS-1c4da21f-127f-48b1-b5b4-ef967b026978,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:33563,DS-1c4da21f-127f-48b1-b5b4-ef967b026978,DISK]) is bad. 2024-11-13T13:35:12,671 WARN [Thread-929 {}] hdfs.DataStreamer(1850): Abandoning BP-1828668861-172.17.0.2-1731504888871:blk_1073741855_1038 2024-11-13T13:35:12,671 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1667689267_22 at /127.0.0.1:52620 [Receiving block BP-1828668861-172.17.0.2-1731504888871:blk_1073741855_1038] {}] datanode.BlockReceiver(316): Block 1073741855 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-13T13:35:12,671 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1667689267_22 at /127.0.0.1:52620 [Receiving block BP-1828668861-172.17.0.2-1731504888871:blk_1073741855_1038] {}] datanode.DataXceiver(331): 127.0.0.1:33931:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:52620 dst: /127.0.0.1:33931 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T13:35:12,671 WARN [Thread-929 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33563,DS-1c4da21f-127f-48b1-b5b4-ef967b026978,DISK] 2024-11-13T13:35:12,673 WARN [Thread-929 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741856_1039 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:34283 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T13:35:12,673 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1667689267_22 at /127.0.0.1:52630 [Receiving block BP-1828668861-172.17.0.2-1731504888871:blk_1073741856_1039] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/210997d4-e94b-09b4-185f-d9911e45a816/cluster_4b5f2985-1792-e678-40f5-a273020dd5a5/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/210997d4-e94b-09b4-185f-d9911e45a816/cluster_4b5f2985-1792-e678-40f5-a273020dd5a5/data/data6]'}, localName='127.0.0.1:33931', datanodeUuid='8f99e9e2-208b-40da-abea-6371320ca5d9', xmitsInProgress=0}:Exception transferring block BP-1828668861-172.17.0.2-1731504888871:blk_1073741856_1039 to mirror 127.0.0.1:34283 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T13:35:12,673 WARN [Thread-929 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1828668861-172.17.0.2-1731504888871:blk_1073741856_1039 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33931,DS-015271de-8eac-4900-8d07-4f9567b2b004,DISK], DatanodeInfoWithStorage[127.0.0.1:34283,DS-96fcb019-1c76-495f-b5d6-3eab33ac87a7,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:34283,DS-96fcb019-1c76-495f-b5d6-3eab33ac87a7,DISK]) is bad. 2024-11-13T13:35:12,673 WARN [Thread-929 {}] hdfs.DataStreamer(1850): Abandoning BP-1828668861-172.17.0.2-1731504888871:blk_1073741856_1039 2024-11-13T13:35:12,673 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1667689267_22 at /127.0.0.1:52630 [Receiving block BP-1828668861-172.17.0.2-1731504888871:blk_1073741856_1039] {}] datanode.BlockReceiver(316): Block 1073741856 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-13T13:35:12,673 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1667689267_22 at /127.0.0.1:52630 [Receiving block BP-1828668861-172.17.0.2-1731504888871:blk_1073741856_1039] {}] datanode.DataXceiver(331): 127.0.0.1:33931:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:52630 dst: /127.0.0.1:33931 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T13:35:12,674 WARN [Thread-929 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34283,DS-96fcb019-1c76-495f-b5d6-3eab33ac87a7,DISK] 2024-11-13T13:35:12,674 WARN [IPC Server handler 2 on default port 40141 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-13T13:35:12,674 WARN [IPC Server handler 2 on default port 40141 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-13T13:35:12,675 WARN [IPC Server handler 2 on default port 40141 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-13T13:35:12,678 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33931 is added to blk_1073741857_1040 (size=6027) 2024-11-13T13:35:12,881 WARN [regionserver/bfeb2336aed7:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33931,DS-015271de-8eac-4900-8d07-4f9567b2b004,DISK]] 2024-11-13T13:35:12,881 INFO [regionserver/bfeb2336aed7:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35467,DS-b26c0838-0080-4619-916b-50cda0a016e8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T13:35:12,881 DEBUG [regionserver/bfeb2336aed7:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog bfeb2336aed7%2C33523%2C1731504891245:(num 1731504908860) roll requested 2024-11-13T13:35:12,882 INFO [regionserver/bfeb2336aed7:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor bfeb2336aed7%2C33523%2C1731504891245.1731504912881 2024-11-13T13:35:12,887 WARN [Thread-935 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741858_1041 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T13:35:12,887 WARN [Thread-935 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1828668861-172.17.0.2-1731504888871:blk_1073741858_1041 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43767,DS-077399b2-6c0a-44a1-87c0-773de642c11b,DISK], DatanodeInfoWithStorage[127.0.0.1:33563,DS-1c4da21f-127f-48b1-b5b4-ef967b026978,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:43767,DS-077399b2-6c0a-44a1-87c0-773de642c11b,DISK]) is bad. 2024-11-13T13:35:12,887 WARN [Thread-935 {}] hdfs.DataStreamer(1850): Abandoning BP-1828668861-172.17.0.2-1731504888871:blk_1073741858_1041 2024-11-13T13:35:12,888 WARN [Thread-935 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:43767,DS-077399b2-6c0a-44a1-87c0-773de642c11b,DISK] 2024-11-13T13:35:12,892 WARN [Thread-935 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741859_1042 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:35467 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T13:35:12,892 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1667689267_22 at /127.0.0.1:52656 [Receiving block BP-1828668861-172.17.0.2-1731504888871:blk_1073741859_1042] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/210997d4-e94b-09b4-185f-d9911e45a816/cluster_4b5f2985-1792-e678-40f5-a273020dd5a5/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/210997d4-e94b-09b4-185f-d9911e45a816/cluster_4b5f2985-1792-e678-40f5-a273020dd5a5/data/data6]'}, localName='127.0.0.1:33931', datanodeUuid='8f99e9e2-208b-40da-abea-6371320ca5d9', xmitsInProgress=0}:Exception transferring block BP-1828668861-172.17.0.2-1731504888871:blk_1073741859_1042 to mirror 127.0.0.1:35467 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T13:35:12,892 WARN [Thread-935 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1828668861-172.17.0.2-1731504888871:blk_1073741859_1042 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33931,DS-015271de-8eac-4900-8d07-4f9567b2b004,DISK], DatanodeInfoWithStorage[127.0.0.1:35467,DS-b26c0838-0080-4619-916b-50cda0a016e8,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:35467,DS-b26c0838-0080-4619-916b-50cda0a016e8,DISK]) is bad. 2024-11-13T13:35:12,892 WARN [Thread-935 {}] hdfs.DataStreamer(1850): Abandoning BP-1828668861-172.17.0.2-1731504888871:blk_1073741859_1042 2024-11-13T13:35:12,892 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1667689267_22 at /127.0.0.1:52656 [Receiving block BP-1828668861-172.17.0.2-1731504888871:blk_1073741859_1042] {}] datanode.BlockReceiver(316): Block 1073741859 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-13T13:35:12,892 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1667689267_22 at /127.0.0.1:52656 [Receiving block BP-1828668861-172.17.0.2-1731504888871:blk_1073741859_1042] {}] datanode.DataXceiver(331): 127.0.0.1:33931:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:52656 dst: /127.0.0.1:33931 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T13:35:12,893 WARN [Thread-935 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35467,DS-b26c0838-0080-4619-916b-50cda0a016e8,DISK] 2024-11-13T13:35:12,896 WARN [Thread-935 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741860_1043 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:34283 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T13:35:12,896 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1667689267_22 at /127.0.0.1:52664 [Receiving block BP-1828668861-172.17.0.2-1731504888871:blk_1073741860_1043] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/210997d4-e94b-09b4-185f-d9911e45a816/cluster_4b5f2985-1792-e678-40f5-a273020dd5a5/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/210997d4-e94b-09b4-185f-d9911e45a816/cluster_4b5f2985-1792-e678-40f5-a273020dd5a5/data/data6]'}, localName='127.0.0.1:33931', datanodeUuid='8f99e9e2-208b-40da-abea-6371320ca5d9', xmitsInProgress=0}:Exception transferring block BP-1828668861-172.17.0.2-1731504888871:blk_1073741860_1043 to mirror 127.0.0.1:34283 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T13:35:12,896 WARN [Thread-935 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1828668861-172.17.0.2-1731504888871:blk_1073741860_1043 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33931,DS-015271de-8eac-4900-8d07-4f9567b2b004,DISK], DatanodeInfoWithStorage[127.0.0.1:34283,DS-96fcb019-1c76-495f-b5d6-3eab33ac87a7,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:34283,DS-96fcb019-1c76-495f-b5d6-3eab33ac87a7,DISK]) is bad. 2024-11-13T13:35:12,897 WARN [Thread-935 {}] hdfs.DataStreamer(1850): Abandoning BP-1828668861-172.17.0.2-1731504888871:blk_1073741860_1043 2024-11-13T13:35:12,897 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1667689267_22 at /127.0.0.1:52664 [Receiving block BP-1828668861-172.17.0.2-1731504888871:blk_1073741860_1043] {}] datanode.BlockReceiver(316): Block 1073741860 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-13T13:35:12,897 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1667689267_22 at /127.0.0.1:52664 [Receiving block BP-1828668861-172.17.0.2-1731504888871:blk_1073741860_1043] {}] datanode.DataXceiver(331): 127.0.0.1:33931:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:52664 dst: /127.0.0.1:33931 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T13:35:12,897 WARN [Thread-935 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34283,DS-96fcb019-1c76-495f-b5d6-3eab33ac87a7,DISK] 2024-11-13T13:35:12,900 WARN [Thread-935 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741861_1044 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:33563 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T13:35:12,900 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1667689267_22 at /127.0.0.1:52680 [Receiving block BP-1828668861-172.17.0.2-1731504888871:blk_1073741861_1044] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/210997d4-e94b-09b4-185f-d9911e45a816/cluster_4b5f2985-1792-e678-40f5-a273020dd5a5/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/210997d4-e94b-09b4-185f-d9911e45a816/cluster_4b5f2985-1792-e678-40f5-a273020dd5a5/data/data6]'}, localName='127.0.0.1:33931', datanodeUuid='8f99e9e2-208b-40da-abea-6371320ca5d9', xmitsInProgress=0}:Exception transferring block BP-1828668861-172.17.0.2-1731504888871:blk_1073741861_1044 to mirror 127.0.0.1:33563 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T13:35:12,900 WARN [Thread-935 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1828668861-172.17.0.2-1731504888871:blk_1073741861_1044 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33931,DS-015271de-8eac-4900-8d07-4f9567b2b004,DISK], DatanodeInfoWithStorage[127.0.0.1:33563,DS-1c4da21f-127f-48b1-b5b4-ef967b026978,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:33563,DS-1c4da21f-127f-48b1-b5b4-ef967b026978,DISK]) is bad. 2024-11-13T13:35:12,900 WARN [Thread-935 {}] hdfs.DataStreamer(1850): Abandoning BP-1828668861-172.17.0.2-1731504888871:blk_1073741861_1044 2024-11-13T13:35:12,900 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1667689267_22 at /127.0.0.1:52680 [Receiving block BP-1828668861-172.17.0.2-1731504888871:blk_1073741861_1044] {}] datanode.BlockReceiver(316): Block 1073741861 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-13T13:35:12,901 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1667689267_22 at /127.0.0.1:52680 [Receiving block BP-1828668861-172.17.0.2-1731504888871:blk_1073741861_1044] {}] datanode.DataXceiver(331): 127.0.0.1:33931:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:52680 dst: /127.0.0.1:33931 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T13:35:12,901 WARN [Thread-935 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33563,DS-1c4da21f-127f-48b1-b5b4-ef967b026978,DISK] 2024-11-13T13:35:12,902 WARN [IPC Server handler 0 on default port 40141 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-13T13:35:12,902 WARN [IPC Server handler 0 on default port 40141 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-13T13:35:12,902 WARN [IPC Server handler 0 on default port 40141 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-13T13:35:12,905 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:35:12,905 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:35:12,905 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:35:12,905 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:35:12,906 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:35:12,906 INFO [regionserver/bfeb2336aed7:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,33523,1731504891245/bfeb2336aed7%2C33523%2C1731504891245.1731504908860 with entries=26, filesize=25.69 KB; new WAL /user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,33523,1731504891245/bfeb2336aed7%2C33523%2C1731504891245.1731504912881 2024-11-13T13:35:12,907 DEBUG [regionserver/bfeb2336aed7:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45215:45215)] 2024-11-13T13:35:12,907 DEBUG [regionserver/bfeb2336aed7:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,33523,1731504891245/bfeb2336aed7%2C33523%2C1731504891245.1731504891893 is not closed yet, will try archiving it next time 2024-11-13T13:35:12,907 DEBUG [regionserver/bfeb2336aed7:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,33523,1731504891245/bfeb2336aed7%2C33523%2C1731504891245.1731504908860 is not closed yet, will try archiving it next time 2024-11-13T13:35:12,908 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,33523,1731504891245/bfeb2336aed7%2C33523%2C1731504891245.1731504904845 to hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/oldWALs/bfeb2336aed7%2C33523%2C1731504891245.1731504904845 2024-11-13T13:35:12,908 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33931 is added to blk_1073741842_1025 (size=26315) 2024-11-13T13:35:13,080 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=32 (bloomFilter=true), to=hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/data/default/TestLogRolling-testLogRollOnDatanodeDeath/791221fd56c9a0e65b7eca684d5a205f/.tmp/info/752738c2a8c6483eb24ba5fde94235bc 2024-11-13T13:35:13,092 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/data/default/TestLogRolling-testLogRollOnDatanodeDeath/791221fd56c9a0e65b7eca684d5a205f/.tmp/info/752738c2a8c6483eb24ba5fde94235bc as hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/data/default/TestLogRolling-testLogRollOnDatanodeDeath/791221fd56c9a0e65b7eca684d5a205f/info/752738c2a8c6483eb24ba5fde94235bc 2024-11-13T13:35:13,099 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/data/default/TestLogRolling-testLogRollOnDatanodeDeath/791221fd56c9a0e65b7eca684d5a205f/info/752738c2a8c6483eb24ba5fde94235bc, entries=1, sequenceid=32, filesize=5.9 K 2024-11-13T13:35:13,100 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7525, heapSize ~8.11 KB/8304, currentSize=2.10 KB/2150 for 791221fd56c9a0e65b7eca684d5a205f in 444ms, sequenceid=32, compaction requested=true 2024-11-13T13:35:13,100 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 791221fd56c9a0e65b7eca684d5a205f: 2024-11-13T13:35:13,101 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=28.2 K, sizeToCheck=16.0 K 2024-11-13T13:35:13,101 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-13T13:35:13,101 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/data/default/TestLogRolling-testLogRollOnDatanodeDeath/791221fd56c9a0e65b7eca684d5a205f/info/0b910b2348c64a5691f6ec9319489b93 because midkey is the same as first or last row 2024-11-13T13:35:13,101 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 791221fd56c9a0e65b7eca684d5a205f:info, priority=-2147483648, current under compaction store size is 1 2024-11-13T13:35:13,101 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-13T13:35:13,101 DEBUG [RS:0;bfeb2336aed7:33523-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-13T13:35:13,102 DEBUG [RS:0;bfeb2336aed7:33523-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 28880 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-13T13:35:13,103 DEBUG [RS:0;bfeb2336aed7:33523-shortCompactions-0 {}] regionserver.HStore(1541): 791221fd56c9a0e65b7eca684d5a205f/info is initiating minor compaction (all files) 2024-11-13T13:35:13,103 INFO [RS:0;bfeb2336aed7:33523-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 791221fd56c9a0e65b7eca684d5a205f/info in TestLogRolling-testLogRollOnDatanodeDeath,,1731504892646.791221fd56c9a0e65b7eca684d5a205f. 2024-11-13T13:35:13,103 INFO [RS:0;bfeb2336aed7:33523-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/data/default/TestLogRolling-testLogRollOnDatanodeDeath/791221fd56c9a0e65b7eca684d5a205f/info/a89abbf15cac4cad9521dc2989663b30, hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/data/default/TestLogRolling-testLogRollOnDatanodeDeath/791221fd56c9a0e65b7eca684d5a205f/info/0b910b2348c64a5691f6ec9319489b93, hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/data/default/TestLogRolling-testLogRollOnDatanodeDeath/791221fd56c9a0e65b7eca684d5a205f/info/752738c2a8c6483eb24ba5fde94235bc] into tmpdir=hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/data/default/TestLogRolling-testLogRollOnDatanodeDeath/791221fd56c9a0e65b7eca684d5a205f/.tmp, totalSize=28.2 K 2024-11-13T13:35:13,103 DEBUG [RS:0;bfeb2336aed7:33523-shortCompactions-0 {}] compactions.Compactor(225): Compacting a89abbf15cac4cad9521dc2989663b30, keycount=5, bloomtype=ROW, size=10.1 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1731504906985 2024-11-13T13:35:13,104 DEBUG [RS:0;bfeb2336aed7:33523-shortCompactions-0 {}] compactions.Compactor(225): Compacting 0b910b2348c64a5691f6ec9319489b93, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=22, earliestPutTs=1731504911015 2024-11-13T13:35:13,104 DEBUG [RS:0;bfeb2336aed7:33523-shortCompactions-0 {}] compactions.Compactor(225): Compacting 752738c2a8c6483eb24ba5fde94235bc, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=32, earliestPutTs=1731504912655 2024-11-13T13:35:13,119 INFO [RS:0;bfeb2336aed7:33523-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 791221fd56c9a0e65b7eca684d5a205f#info#compaction#21 average throughput is 6.16 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-13T13:35:13,119 DEBUG [RS:0;bfeb2336aed7:33523-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/data/default/TestLogRolling-testLogRollOnDatanodeDeath/791221fd56c9a0e65b7eca684d5a205f/.tmp/info/a1d06f447e274837ab65702cd1d4822f is 1080, key is row0002/info:/1731504906985/Put/seqid=0 2024-11-13T13:35:13,122 WARN [Thread-944 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741863_1046 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:43767 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T13:35:13,122 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1667689267_22 at /127.0.0.1:52724 [Receiving block BP-1828668861-172.17.0.2-1731504888871:blk_1073741863_1046] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/210997d4-e94b-09b4-185f-d9911e45a816/cluster_4b5f2985-1792-e678-40f5-a273020dd5a5/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/210997d4-e94b-09b4-185f-d9911e45a816/cluster_4b5f2985-1792-e678-40f5-a273020dd5a5/data/data6]'}, localName='127.0.0.1:33931', datanodeUuid='8f99e9e2-208b-40da-abea-6371320ca5d9', xmitsInProgress=0}:Exception transferring block BP-1828668861-172.17.0.2-1731504888871:blk_1073741863_1046 to mirror 127.0.0.1:43767 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T13:35:13,122 WARN [Thread-944 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1828668861-172.17.0.2-1731504888871:blk_1073741863_1046 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33931,DS-015271de-8eac-4900-8d07-4f9567b2b004,DISK], DatanodeInfoWithStorage[127.0.0.1:43767,DS-077399b2-6c0a-44a1-87c0-773de642c11b,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:43767,DS-077399b2-6c0a-44a1-87c0-773de642c11b,DISK]) is bad. 2024-11-13T13:35:13,122 WARN [Thread-944 {}] hdfs.DataStreamer(1850): Abandoning BP-1828668861-172.17.0.2-1731504888871:blk_1073741863_1046 2024-11-13T13:35:13,123 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1667689267_22 at /127.0.0.1:52724 [Receiving block BP-1828668861-172.17.0.2-1731504888871:blk_1073741863_1046] {}] datanode.BlockReceiver(316): Block 1073741863 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-13T13:35:13,123 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1667689267_22 at /127.0.0.1:52724 [Receiving block BP-1828668861-172.17.0.2-1731504888871:blk_1073741863_1046] {}] datanode.DataXceiver(331): 127.0.0.1:33931:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:52724 dst: /127.0.0.1:33931 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T13:35:13,123 WARN [Thread-944 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:43767,DS-077399b2-6c0a-44a1-87c0-773de642c11b,DISK] 2024-11-13T13:35:13,125 WARN [Thread-944 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741864_1047 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:34283 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T13:35:13,125 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1667689267_22 at /127.0.0.1:52736 [Receiving block BP-1828668861-172.17.0.2-1731504888871:blk_1073741864_1047] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/210997d4-e94b-09b4-185f-d9911e45a816/cluster_4b5f2985-1792-e678-40f5-a273020dd5a5/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/210997d4-e94b-09b4-185f-d9911e45a816/cluster_4b5f2985-1792-e678-40f5-a273020dd5a5/data/data6]'}, localName='127.0.0.1:33931', datanodeUuid='8f99e9e2-208b-40da-abea-6371320ca5d9', xmitsInProgress=0}:Exception transferring block BP-1828668861-172.17.0.2-1731504888871:blk_1073741864_1047 to mirror 127.0.0.1:34283 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T13:35:13,125 WARN [Thread-944 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1828668861-172.17.0.2-1731504888871:blk_1073741864_1047 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33931,DS-015271de-8eac-4900-8d07-4f9567b2b004,DISK], DatanodeInfoWithStorage[127.0.0.1:34283,DS-96fcb019-1c76-495f-b5d6-3eab33ac87a7,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:34283,DS-96fcb019-1c76-495f-b5d6-3eab33ac87a7,DISK]) is bad. 2024-11-13T13:35:13,125 WARN [Thread-944 {}] hdfs.DataStreamer(1850): Abandoning BP-1828668861-172.17.0.2-1731504888871:blk_1073741864_1047 2024-11-13T13:35:13,125 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1667689267_22 at /127.0.0.1:52736 [Receiving block BP-1828668861-172.17.0.2-1731504888871:blk_1073741864_1047] {}] datanode.BlockReceiver(316): Block 1073741864 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-13T13:35:13,126 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1667689267_22 at /127.0.0.1:52736 [Receiving block BP-1828668861-172.17.0.2-1731504888871:blk_1073741864_1047] {}] datanode.DataXceiver(331): 127.0.0.1:33931:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:52736 dst: /127.0.0.1:33931 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T13:35:13,126 WARN [Thread-944 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34283,DS-96fcb019-1c76-495f-b5d6-3eab33ac87a7,DISK] 2024-11-13T13:35:13,127 WARN [Thread-944 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741865_1048 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T13:35:13,127 WARN [Thread-944 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1828668861-172.17.0.2-1731504888871:blk_1073741865_1048 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33563,DS-1c4da21f-127f-48b1-b5b4-ef967b026978,DISK], DatanodeInfoWithStorage[127.0.0.1:35467,DS-b26c0838-0080-4619-916b-50cda0a016e8,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33563,DS-1c4da21f-127f-48b1-b5b4-ef967b026978,DISK]) is bad. 2024-11-13T13:35:13,127 WARN [Thread-944 {}] hdfs.DataStreamer(1850): Abandoning BP-1828668861-172.17.0.2-1731504888871:blk_1073741865_1048 2024-11-13T13:35:13,128 WARN [Thread-944 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33563,DS-1c4da21f-127f-48b1-b5b4-ef967b026978,DISK] 2024-11-13T13:35:13,129 WARN [Thread-944 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741866_1049 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T13:35:13,129 WARN [Thread-944 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1828668861-172.17.0.2-1731504888871:blk_1073741866_1049 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35467,DS-b26c0838-0080-4619-916b-50cda0a016e8,DISK], DatanodeInfoWithStorage[127.0.0.1:33931,DS-015271de-8eac-4900-8d07-4f9567b2b004,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:35467,DS-b26c0838-0080-4619-916b-50cda0a016e8,DISK]) is bad. 2024-11-13T13:35:13,129 WARN [Thread-944 {}] hdfs.DataStreamer(1850): Abandoning BP-1828668861-172.17.0.2-1731504888871:blk_1073741866_1049 2024-11-13T13:35:13,129 WARN [Thread-944 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35467,DS-b26c0838-0080-4619-916b-50cda0a016e8,DISK] 2024-11-13T13:35:13,130 WARN [IPC Server handler 1 on default port 40141 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-13T13:35:13,130 WARN [IPC Server handler 1 on default port 40141 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-13T13:35:13,130 WARN [IPC Server handler 1 on default port 40141 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-13T13:35:13,133 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33931 is added to blk_1073741867_1050 (size=17994) 2024-11-13T13:35:13,142 DEBUG [RS:0;bfeb2336aed7:33523-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/data/default/TestLogRolling-testLogRollOnDatanodeDeath/791221fd56c9a0e65b7eca684d5a205f/.tmp/info/a1d06f447e274837ab65702cd1d4822f as hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/data/default/TestLogRolling-testLogRollOnDatanodeDeath/791221fd56c9a0e65b7eca684d5a205f/info/a1d06f447e274837ab65702cd1d4822f 2024-11-13T13:35:13,151 INFO [RS:0;bfeb2336aed7:33523-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 791221fd56c9a0e65b7eca684d5a205f/info of 791221fd56c9a0e65b7eca684d5a205f into a1d06f447e274837ab65702cd1d4822f(size=17.6 K), total size for store is 17.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-13T13:35:13,151 DEBUG [RS:0;bfeb2336aed7:33523-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 791221fd56c9a0e65b7eca684d5a205f: 2024-11-13T13:35:13,151 INFO [RS:0;bfeb2336aed7:33523-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRollOnDatanodeDeath,,1731504892646.791221fd56c9a0e65b7eca684d5a205f., storeName=791221fd56c9a0e65b7eca684d5a205f/info, priority=13, startTime=1731504913101; duration=0sec 2024-11-13T13:35:13,151 DEBUG [RS:0;bfeb2336aed7:33523-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-11-13T13:35:13,151 DEBUG [RS:0;bfeb2336aed7:33523-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-13T13:35:13,151 DEBUG [RS:0;bfeb2336aed7:33523-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/data/default/TestLogRolling-testLogRollOnDatanodeDeath/791221fd56c9a0e65b7eca684d5a205f/info/a1d06f447e274837ab65702cd1d4822f because midkey is the same as first or last row 2024-11-13T13:35:13,151 DEBUG [RS:0;bfeb2336aed7:33523-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-11-13T13:35:13,151 DEBUG [RS:0;bfeb2336aed7:33523-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-13T13:35:13,151 DEBUG [RS:0;bfeb2336aed7:33523-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/data/default/TestLogRolling-testLogRollOnDatanodeDeath/791221fd56c9a0e65b7eca684d5a205f/info/a1d06f447e274837ab65702cd1d4822f because midkey is the same as first or last row 2024-11-13T13:35:13,152 DEBUG [RS:0;bfeb2336aed7:33523-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-11-13T13:35:13,152 DEBUG [RS:0;bfeb2336aed7:33523-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-13T13:35:13,152 DEBUG [RS:0;bfeb2336aed7:33523-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/data/default/TestLogRolling-testLogRollOnDatanodeDeath/791221fd56c9a0e65b7eca684d5a205f/info/a1d06f447e274837ab65702cd1d4822f because midkey is the same as first or last row 2024-11-13T13:35:13,152 DEBUG [RS:0;bfeb2336aed7:33523-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-13T13:35:13,152 DEBUG [RS:0;bfeb2336aed7:33523-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 791221fd56c9a0e65b7eca684d5a205f:info 2024-11-13T13:35:13,309 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,33523,1731504891245/bfeb2336aed7%2C33523%2C1731504891245.1731504891893 is not closed yet, will try archiving it next time 2024-11-13T13:35:13,360 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35467,DS-b26c0838-0080-4619-916b-50cda0a016e8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T13:35:13,566 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@4328d5c8[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:33931, datanodeUuid=8f99e9e2-208b-40da-abea-6371320ca5d9, infoPort=45215, infoSecurePort=0, ipcPort=43989, storageInfo=lv=-57;cid=testClusterID;nsid=373864911;c=1731504888871):Failed to transfer BP-1828668861-172.17.0.2-1731504888871:blk_1073741847_1030 to 127.0.0.1:33563 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T13:35:13,567 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@4def23dd[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:33931, datanodeUuid=8f99e9e2-208b-40da-abea-6371320ca5d9, infoPort=45215, infoSecurePort=0, ipcPort=43989, storageInfo=lv=-57;cid=testClusterID;nsid=373864911;c=1731504888871):Failed to transfer BP-1828668861-172.17.0.2-1731504888871:blk_1073741852_1035 to 127.0.0.1:43767 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T13:35:14,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33523 {}] regionserver.HRegion(8855): Flush requested on 791221fd56c9a0e65b7eca684d5a205f 2024-11-13T13:35:14,080 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 791221fd56c9a0e65b7eca684d5a205f 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-13T13:35:14,086 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/data/default/TestLogRolling-testLogRollOnDatanodeDeath/791221fd56c9a0e65b7eca684d5a205f/.tmp/info/5cdc0464f023414fba63e8698647c01e is 1079, key is tmprow/info:/1731504914079/Put/seqid=0 2024-11-13T13:35:14,088 WARN [Thread-954 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741868_1051 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T13:35:14,089 WARN [Thread-954 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1828668861-172.17.0.2-1731504888871:blk_1073741868_1051 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43767,DS-077399b2-6c0a-44a1-87c0-773de642c11b,DISK], DatanodeInfoWithStorage[127.0.0.1:34283,DS-96fcb019-1c76-495f-b5d6-3eab33ac87a7,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:43767,DS-077399b2-6c0a-44a1-87c0-773de642c11b,DISK]) is bad. 2024-11-13T13:35:14,089 WARN [Thread-954 {}] hdfs.DataStreamer(1850): Abandoning BP-1828668861-172.17.0.2-1731504888871:blk_1073741868_1051 2024-11-13T13:35:14,089 WARN [Thread-954 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:43767,DS-077399b2-6c0a-44a1-87c0-773de642c11b,DISK] 2024-11-13T13:35:14,091 WARN [Thread-954 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741869_1052 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T13:35:14,091 WARN [Thread-954 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1828668861-172.17.0.2-1731504888871:blk_1073741869_1052 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34283,DS-96fcb019-1c76-495f-b5d6-3eab33ac87a7,DISK], DatanodeInfoWithStorage[127.0.0.1:33931,DS-015271de-8eac-4900-8d07-4f9567b2b004,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34283,DS-96fcb019-1c76-495f-b5d6-3eab33ac87a7,DISK]) is bad. 2024-11-13T13:35:14,091 WARN [Thread-954 {}] hdfs.DataStreamer(1850): Abandoning BP-1828668861-172.17.0.2-1731504888871:blk_1073741869_1052 2024-11-13T13:35:14,092 WARN [Thread-954 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34283,DS-96fcb019-1c76-495f-b5d6-3eab33ac87a7,DISK] 2024-11-13T13:35:14,094 WARN [Thread-954 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741870_1053 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T13:35:14,094 WARN [Thread-954 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1828668861-172.17.0.2-1731504888871:blk_1073741870_1053 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33563,DS-1c4da21f-127f-48b1-b5b4-ef967b026978,DISK], DatanodeInfoWithStorage[127.0.0.1:35467,DS-b26c0838-0080-4619-916b-50cda0a016e8,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33563,DS-1c4da21f-127f-48b1-b5b4-ef967b026978,DISK]) is bad. 2024-11-13T13:35:14,094 WARN [Thread-954 {}] hdfs.DataStreamer(1850): Abandoning BP-1828668861-172.17.0.2-1731504888871:blk_1073741870_1053 2024-11-13T13:35:14,095 WARN [Thread-954 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33563,DS-1c4da21f-127f-48b1-b5b4-ef967b026978,DISK] 2024-11-13T13:35:14,098 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1667689267_22 at /127.0.0.1:52760 [Receiving block BP-1828668861-172.17.0.2-1731504888871:blk_1073741871_1054] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/210997d4-e94b-09b4-185f-d9911e45a816/cluster_4b5f2985-1792-e678-40f5-a273020dd5a5/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/210997d4-e94b-09b4-185f-d9911e45a816/cluster_4b5f2985-1792-e678-40f5-a273020dd5a5/data/data6]'}, localName='127.0.0.1:33931', datanodeUuid='8f99e9e2-208b-40da-abea-6371320ca5d9', xmitsInProgress=0}:Exception transferring block BP-1828668861-172.17.0.2-1731504888871:blk_1073741871_1054 to mirror 127.0.0.1:35467 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T13:35:14,098 WARN [Thread-954 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741871_1054 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:35467 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T13:35:14,098 WARN [Thread-954 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1828668861-172.17.0.2-1731504888871:blk_1073741871_1054 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33931,DS-015271de-8eac-4900-8d07-4f9567b2b004,DISK], DatanodeInfoWithStorage[127.0.0.1:35467,DS-b26c0838-0080-4619-916b-50cda0a016e8,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:35467,DS-b26c0838-0080-4619-916b-50cda0a016e8,DISK]) is bad. 2024-11-13T13:35:14,098 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1667689267_22 at /127.0.0.1:52760 [Receiving block BP-1828668861-172.17.0.2-1731504888871:blk_1073741871_1054] {}] datanode.BlockReceiver(316): Block 1073741871 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-13T13:35:14,099 WARN [Thread-954 {}] hdfs.DataStreamer(1850): Abandoning BP-1828668861-172.17.0.2-1731504888871:blk_1073741871_1054 2024-11-13T13:35:14,099 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1667689267_22 at /127.0.0.1:52760 [Receiving block BP-1828668861-172.17.0.2-1731504888871:blk_1073741871_1054] {}] datanode.DataXceiver(331): 127.0.0.1:33931:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:52760 dst: /127.0.0.1:33931 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T13:35:14,099 WARN [Thread-954 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35467,DS-b26c0838-0080-4619-916b-50cda0a016e8,DISK] 2024-11-13T13:35:14,101 WARN [IPC Server handler 3 on default port 40141 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-13T13:35:14,101 WARN [IPC Server handler 3 on default port 40141 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-13T13:35:14,101 WARN [IPC Server handler 3 on default port 40141 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-13T13:35:14,105 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33931 is added to blk_1073741872_1055 (size=6027) 2024-11-13T13:35:14,506 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=43 (bloomFilter=true), to=hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/data/default/TestLogRolling-testLogRollOnDatanodeDeath/791221fd56c9a0e65b7eca684d5a205f/.tmp/info/5cdc0464f023414fba63e8698647c01e 2024-11-13T13:35:14,517 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/data/default/TestLogRolling-testLogRollOnDatanodeDeath/791221fd56c9a0e65b7eca684d5a205f/.tmp/info/5cdc0464f023414fba63e8698647c01e as hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/data/default/TestLogRolling-testLogRollOnDatanodeDeath/791221fd56c9a0e65b7eca684d5a205f/info/5cdc0464f023414fba63e8698647c01e 2024-11-13T13:35:14,525 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/data/default/TestLogRolling-testLogRollOnDatanodeDeath/791221fd56c9a0e65b7eca684d5a205f/info/5cdc0464f023414fba63e8698647c01e, entries=1, sequenceid=43, filesize=5.9 K 2024-11-13T13:35:14,526 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7525, heapSize ~8.11 KB/8304, currentSize=2.10 KB/2150 for 791221fd56c9a0e65b7eca684d5a205f in 446ms, sequenceid=43, compaction requested=false 2024-11-13T13:35:14,526 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 791221fd56c9a0e65b7eca684d5a205f: 2024-11-13T13:35:14,526 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=23.5 K, sizeToCheck=16.0 K 2024-11-13T13:35:14,526 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-13T13:35:14,526 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/data/default/TestLogRolling-testLogRollOnDatanodeDeath/791221fd56c9a0e65b7eca684d5a205f/info/a1d06f447e274837ab65702cd1d4822f because midkey is the same as first or last row 2024-11-13T13:35:14,565 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@4328d5c8[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:33931, datanodeUuid=8f99e9e2-208b-40da-abea-6371320ca5d9, infoPort=45215, infoSecurePort=0, ipcPort=43989, storageInfo=lv=-57;cid=testClusterID;nsid=373864911;c=1731504888871):Failed to transfer BP-1828668861-172.17.0.2-1731504888871:blk_1073741842_1025 to 127.0.0.1:35467 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T13:35:14,565 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@4def23dd[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:33931, datanodeUuid=8f99e9e2-208b-40da-abea-6371320ca5d9, infoPort=45215, infoSecurePort=0, ipcPort=43989, storageInfo=lv=-57;cid=testClusterID;nsid=373864911;c=1731504888871):Failed to transfer BP-1828668861-172.17.0.2-1731504888871:blk_1073741857_1040 to 127.0.0.1:35467 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T13:35:14,615 INFO [regionserver/bfeb2336aed7:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35467,DS-b26c0838-0080-4619-916b-50cda0a016e8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T13:35:14,908 WARN [regionserver/bfeb2336aed7:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33931,DS-015271de-8eac-4900-8d07-4f9567b2b004,DISK]] 2024-11-13T13:35:14,908 INFO [regionserver/bfeb2336aed7:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35467,DS-b26c0838-0080-4619-916b-50cda0a016e8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T13:35:14,908 DEBUG [regionserver/bfeb2336aed7:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog bfeb2336aed7%2C33523%2C1731504891245:(num 1731504912881) roll requested 2024-11-13T13:35:14,909 INFO [regionserver/bfeb2336aed7:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor bfeb2336aed7%2C33523%2C1731504891245.1731504914909 2024-11-13T13:35:14,913 WARN [Thread-959 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741873_1056 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T13:35:14,914 WARN [Thread-959 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1828668861-172.17.0.2-1731504888871:blk_1073741873_1056 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35467,DS-b26c0838-0080-4619-916b-50cda0a016e8,DISK], DatanodeInfoWithStorage[127.0.0.1:33931,DS-015271de-8eac-4900-8d07-4f9567b2b004,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:35467,DS-b26c0838-0080-4619-916b-50cda0a016e8,DISK]) is bad. 2024-11-13T13:35:14,914 WARN [Thread-959 {}] hdfs.DataStreamer(1850): Abandoning BP-1828668861-172.17.0.2-1731504888871:blk_1073741873_1056 2024-11-13T13:35:14,915 WARN [Thread-959 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35467,DS-b26c0838-0080-4619-916b-50cda0a016e8,DISK] 2024-11-13T13:35:14,916 WARN [Thread-959 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741874_1057 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T13:35:14,916 WARN [Thread-959 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1828668861-172.17.0.2-1731504888871:blk_1073741874_1057 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43767,DS-077399b2-6c0a-44a1-87c0-773de642c11b,DISK], DatanodeInfoWithStorage[127.0.0.1:33931,DS-015271de-8eac-4900-8d07-4f9567b2b004,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:43767,DS-077399b2-6c0a-44a1-87c0-773de642c11b,DISK]) is bad. 2024-11-13T13:35:14,916 WARN [Thread-959 {}] hdfs.DataStreamer(1850): Abandoning BP-1828668861-172.17.0.2-1731504888871:blk_1073741874_1057 2024-11-13T13:35:14,917 WARN [Thread-959 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:43767,DS-077399b2-6c0a-44a1-87c0-773de642c11b,DISK] 2024-11-13T13:35:14,919 WARN [Thread-959 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741875_1058 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T13:35:14,919 WARN [Thread-959 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1828668861-172.17.0.2-1731504888871:blk_1073741875_1058 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33563,DS-1c4da21f-127f-48b1-b5b4-ef967b026978,DISK], DatanodeInfoWithStorage[127.0.0.1:34283,DS-96fcb019-1c76-495f-b5d6-3eab33ac87a7,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33563,DS-1c4da21f-127f-48b1-b5b4-ef967b026978,DISK]) is bad. 2024-11-13T13:35:14,919 WARN [Thread-959 {}] hdfs.DataStreamer(1850): Abandoning BP-1828668861-172.17.0.2-1731504888871:blk_1073741875_1058 2024-11-13T13:35:14,920 WARN [Thread-959 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33563,DS-1c4da21f-127f-48b1-b5b4-ef967b026978,DISK] 2024-11-13T13:35:14,922 WARN [Thread-959 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741876_1059 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:34283 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T13:35:14,922 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1667689267_22 at /127.0.0.1:52786 [Receiving block BP-1828668861-172.17.0.2-1731504888871:blk_1073741876_1059] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/210997d4-e94b-09b4-185f-d9911e45a816/cluster_4b5f2985-1792-e678-40f5-a273020dd5a5/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/210997d4-e94b-09b4-185f-d9911e45a816/cluster_4b5f2985-1792-e678-40f5-a273020dd5a5/data/data6]'}, localName='127.0.0.1:33931', datanodeUuid='8f99e9e2-208b-40da-abea-6371320ca5d9', xmitsInProgress=0}:Exception transferring block BP-1828668861-172.17.0.2-1731504888871:blk_1073741876_1059 to mirror 127.0.0.1:34283 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T13:35:14,922 WARN [Thread-959 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1828668861-172.17.0.2-1731504888871:blk_1073741876_1059 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33931,DS-015271de-8eac-4900-8d07-4f9567b2b004,DISK], DatanodeInfoWithStorage[127.0.0.1:34283,DS-96fcb019-1c76-495f-b5d6-3eab33ac87a7,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:34283,DS-96fcb019-1c76-495f-b5d6-3eab33ac87a7,DISK]) is bad. 2024-11-13T13:35:14,922 WARN [Thread-959 {}] hdfs.DataStreamer(1850): Abandoning BP-1828668861-172.17.0.2-1731504888871:blk_1073741876_1059 2024-11-13T13:35:14,923 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1667689267_22 at /127.0.0.1:52786 [Receiving block BP-1828668861-172.17.0.2-1731504888871:blk_1073741876_1059] {}] datanode.BlockReceiver(316): Block 1073741876 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-13T13:35:14,923 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1667689267_22 at /127.0.0.1:52786 [Receiving block BP-1828668861-172.17.0.2-1731504888871:blk_1073741876_1059] {}] datanode.DataXceiver(331): 127.0.0.1:33931:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:52786 dst: /127.0.0.1:33931 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T13:35:14,923 WARN [Thread-959 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34283,DS-96fcb019-1c76-495f-b5d6-3eab33ac87a7,DISK] 2024-11-13T13:35:14,924 WARN [IPC Server handler 3 on default port 40141 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-13T13:35:14,924 WARN [IPC Server handler 3 on default port 40141 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-13T13:35:14,924 WARN [IPC Server handler 3 on default port 40141 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-13T13:35:14,927 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:35:14,928 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:35:14,928 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:35:14,928 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:35:14,928 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:35:14,928 INFO [regionserver/bfeb2336aed7:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,33523,1731504891245/bfeb2336aed7%2C33523%2C1731504891245.1731504912881 with entries=14, filesize=12.95 KB; new WAL /user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,33523,1731504891245/bfeb2336aed7%2C33523%2C1731504891245.1731504914909 2024-11-13T13:35:14,929 DEBUG [regionserver/bfeb2336aed7:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45215:45215)] 2024-11-13T13:35:14,929 DEBUG [regionserver/bfeb2336aed7:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,33523,1731504891245/bfeb2336aed7%2C33523%2C1731504891245.1731504891893 is not closed yet, will try archiving it next time 2024-11-13T13:35:14,929 DEBUG [regionserver/bfeb2336aed7:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,33523,1731504891245/bfeb2336aed7%2C33523%2C1731504891245.1731504912881 is not closed yet, will try archiving it next time 2024-11-13T13:35:14,930 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,33523,1731504891245/bfeb2336aed7%2C33523%2C1731504891245.1731504908860 to hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/oldWALs/bfeb2336aed7%2C33523%2C1731504891245.1731504908860 2024-11-13T13:35:14,931 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33931 is added to blk_1073741862_1045 (size=13268) 2024-11-13T13:35:15,333 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,33523,1731504891245/bfeb2336aed7%2C33523%2C1731504891245.1731504891893 is not closed yet, will try archiving it next time 2024-11-13T13:35:15,361 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35467,DS-b26c0838-0080-4619-916b-50cda0a016e8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T13:35:15,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33523 {}] regionserver.HRegion(8855): Flush requested on 791221fd56c9a0e65b7eca684d5a205f 2024-11-13T13:35:15,517 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 791221fd56c9a0e65b7eca684d5a205f 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-13T13:35:15,525 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/data/default/TestLogRolling-testLogRollOnDatanodeDeath/791221fd56c9a0e65b7eca684d5a205f/.tmp/info/8b5a231fa4df40b6b2b6b051ad176320 is 1079, key is tmprow/info:/1731504915514/Put/seqid=0 2024-11-13T13:35:15,528 WARN [Thread-964 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741878_1061 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T13:35:15,528 WARN [Thread-964 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1828668861-172.17.0.2-1731504888871:blk_1073741878_1061 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34283,DS-96fcb019-1c76-495f-b5d6-3eab33ac87a7,DISK], DatanodeInfoWithStorage[127.0.0.1:35467,DS-b26c0838-0080-4619-916b-50cda0a016e8,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34283,DS-96fcb019-1c76-495f-b5d6-3eab33ac87a7,DISK]) is bad. 2024-11-13T13:35:15,528 WARN [Thread-964 {}] hdfs.DataStreamer(1850): Abandoning BP-1828668861-172.17.0.2-1731504888871:blk_1073741878_1061 2024-11-13T13:35:15,529 WARN [Thread-964 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34283,DS-96fcb019-1c76-495f-b5d6-3eab33ac87a7,DISK] 2024-11-13T13:35:15,531 WARN [Thread-964 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741879_1062 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:35467 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T13:35:15,531 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1667689267_22 at /127.0.0.1:52806 [Receiving block BP-1828668861-172.17.0.2-1731504888871:blk_1073741879_1062] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/210997d4-e94b-09b4-185f-d9911e45a816/cluster_4b5f2985-1792-e678-40f5-a273020dd5a5/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/210997d4-e94b-09b4-185f-d9911e45a816/cluster_4b5f2985-1792-e678-40f5-a273020dd5a5/data/data6]'}, localName='127.0.0.1:33931', datanodeUuid='8f99e9e2-208b-40da-abea-6371320ca5d9', xmitsInProgress=0}:Exception transferring block BP-1828668861-172.17.0.2-1731504888871:blk_1073741879_1062 to mirror 127.0.0.1:35467 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T13:35:15,531 WARN [Thread-964 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1828668861-172.17.0.2-1731504888871:blk_1073741879_1062 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33931,DS-015271de-8eac-4900-8d07-4f9567b2b004,DISK], DatanodeInfoWithStorage[127.0.0.1:35467,DS-b26c0838-0080-4619-916b-50cda0a016e8,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:35467,DS-b26c0838-0080-4619-916b-50cda0a016e8,DISK]) is bad. 2024-11-13T13:35:15,531 WARN [Thread-964 {}] hdfs.DataStreamer(1850): Abandoning BP-1828668861-172.17.0.2-1731504888871:blk_1073741879_1062 2024-11-13T13:35:15,531 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1667689267_22 at /127.0.0.1:52806 [Receiving block BP-1828668861-172.17.0.2-1731504888871:blk_1073741879_1062] {}] datanode.BlockReceiver(316): Block 1073741879 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-13T13:35:15,531 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1667689267_22 at /127.0.0.1:52806 [Receiving block BP-1828668861-172.17.0.2-1731504888871:blk_1073741879_1062] {}] datanode.DataXceiver(331): 127.0.0.1:33931:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:52806 dst: /127.0.0.1:33931 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T13:35:15,531 WARN [Thread-964 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35467,DS-b26c0838-0080-4619-916b-50cda0a016e8,DISK] 2024-11-13T13:35:15,533 WARN [Thread-964 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741880_1063 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:33563 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T13:35:15,533 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1667689267_22 at /127.0.0.1:52822 [Receiving block BP-1828668861-172.17.0.2-1731504888871:blk_1073741880_1063] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/210997d4-e94b-09b4-185f-d9911e45a816/cluster_4b5f2985-1792-e678-40f5-a273020dd5a5/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/210997d4-e94b-09b4-185f-d9911e45a816/cluster_4b5f2985-1792-e678-40f5-a273020dd5a5/data/data6]'}, localName='127.0.0.1:33931', datanodeUuid='8f99e9e2-208b-40da-abea-6371320ca5d9', xmitsInProgress=0}:Exception transferring block BP-1828668861-172.17.0.2-1731504888871:blk_1073741880_1063 to mirror 127.0.0.1:33563 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T13:35:15,533 WARN [Thread-964 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1828668861-172.17.0.2-1731504888871:blk_1073741880_1063 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33931,DS-015271de-8eac-4900-8d07-4f9567b2b004,DISK], DatanodeInfoWithStorage[127.0.0.1:33563,DS-1c4da21f-127f-48b1-b5b4-ef967b026978,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:33563,DS-1c4da21f-127f-48b1-b5b4-ef967b026978,DISK]) is bad. 2024-11-13T13:35:15,534 WARN [Thread-964 {}] hdfs.DataStreamer(1850): Abandoning BP-1828668861-172.17.0.2-1731504888871:blk_1073741880_1063 2024-11-13T13:35:15,534 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1667689267_22 at /127.0.0.1:52822 [Receiving block BP-1828668861-172.17.0.2-1731504888871:blk_1073741880_1063] {}] datanode.BlockReceiver(316): Block 1073741880 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-13T13:35:15,534 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1667689267_22 at /127.0.0.1:52822 [Receiving block BP-1828668861-172.17.0.2-1731504888871:blk_1073741880_1063] {}] datanode.DataXceiver(331): 127.0.0.1:33931:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:52822 dst: /127.0.0.1:33931 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T13:35:15,534 WARN [Thread-964 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33563,DS-1c4da21f-127f-48b1-b5b4-ef967b026978,DISK] 2024-11-13T13:35:15,535 WARN [Thread-964 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741881_1064 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T13:35:15,535 WARN [Thread-964 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1828668861-172.17.0.2-1731504888871:blk_1073741881_1064 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43767,DS-077399b2-6c0a-44a1-87c0-773de642c11b,DISK], DatanodeInfoWithStorage[127.0.0.1:33931,DS-015271de-8eac-4900-8d07-4f9567b2b004,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:43767,DS-077399b2-6c0a-44a1-87c0-773de642c11b,DISK]) is bad. 2024-11-13T13:35:15,535 WARN [Thread-964 {}] hdfs.DataStreamer(1850): Abandoning BP-1828668861-172.17.0.2-1731504888871:blk_1073741881_1064 2024-11-13T13:35:15,536 WARN [Thread-964 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:43767,DS-077399b2-6c0a-44a1-87c0-773de642c11b,DISK] 2024-11-13T13:35:15,536 WARN [IPC Server handler 4 on default port 40141 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-13T13:35:15,537 WARN [IPC Server handler 4 on default port 40141 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-13T13:35:15,537 WARN [IPC Server handler 4 on default port 40141 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-13T13:35:15,539 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33931 is added to blk_1073741882_1065 (size=6027) 2024-11-13T13:35:15,940 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=53 (bloomFilter=true), to=hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/data/default/TestLogRolling-testLogRollOnDatanodeDeath/791221fd56c9a0e65b7eca684d5a205f/.tmp/info/8b5a231fa4df40b6b2b6b051ad176320 2024-11-13T13:35:15,947 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/data/default/TestLogRolling-testLogRollOnDatanodeDeath/791221fd56c9a0e65b7eca684d5a205f/.tmp/info/8b5a231fa4df40b6b2b6b051ad176320 as hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/data/default/TestLogRolling-testLogRollOnDatanodeDeath/791221fd56c9a0e65b7eca684d5a205f/info/8b5a231fa4df40b6b2b6b051ad176320 2024-11-13T13:35:15,955 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/data/default/TestLogRolling-testLogRollOnDatanodeDeath/791221fd56c9a0e65b7eca684d5a205f/info/8b5a231fa4df40b6b2b6b051ad176320, entries=1, sequenceid=53, filesize=5.9 K 2024-11-13T13:35:15,957 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7525, heapSize ~8.11 KB/8304, currentSize=2.10 KB/2150 for 791221fd56c9a0e65b7eca684d5a205f in 439ms, sequenceid=53, compaction requested=true 2024-11-13T13:35:15,957 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 791221fd56c9a0e65b7eca684d5a205f: 2024-11-13T13:35:15,957 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=29.3 K, sizeToCheck=16.0 K 2024-11-13T13:35:15,957 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-13T13:35:15,957 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/data/default/TestLogRolling-testLogRollOnDatanodeDeath/791221fd56c9a0e65b7eca684d5a205f/info/a1d06f447e274837ab65702cd1d4822f because midkey is the same as first or last row 2024-11-13T13:35:15,957 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 791221fd56c9a0e65b7eca684d5a205f:info, priority=-2147483648, current under compaction store size is 1 2024-11-13T13:35:15,958 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-13T13:35:15,958 DEBUG [RS:0;bfeb2336aed7:33523-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-13T13:35:15,960 DEBUG [RS:0;bfeb2336aed7:33523-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 30048 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-13T13:35:15,960 DEBUG [RS:0;bfeb2336aed7:33523-shortCompactions-0 {}] regionserver.HStore(1541): 791221fd56c9a0e65b7eca684d5a205f/info is initiating minor compaction (all files) 2024-11-13T13:35:15,960 INFO [RS:0;bfeb2336aed7:33523-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 791221fd56c9a0e65b7eca684d5a205f/info in TestLogRolling-testLogRollOnDatanodeDeath,,1731504892646.791221fd56c9a0e65b7eca684d5a205f. 2024-11-13T13:35:15,960 INFO [RS:0;bfeb2336aed7:33523-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/data/default/TestLogRolling-testLogRollOnDatanodeDeath/791221fd56c9a0e65b7eca684d5a205f/info/a1d06f447e274837ab65702cd1d4822f, hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/data/default/TestLogRolling-testLogRollOnDatanodeDeath/791221fd56c9a0e65b7eca684d5a205f/info/5cdc0464f023414fba63e8698647c01e, hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/data/default/TestLogRolling-testLogRollOnDatanodeDeath/791221fd56c9a0e65b7eca684d5a205f/info/8b5a231fa4df40b6b2b6b051ad176320] into tmpdir=hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/data/default/TestLogRolling-testLogRollOnDatanodeDeath/791221fd56c9a0e65b7eca684d5a205f/.tmp, totalSize=29.3 K 2024-11-13T13:35:15,961 DEBUG [RS:0;bfeb2336aed7:33523-shortCompactions-0 {}] compactions.Compactor(225): Compacting a1d06f447e274837ab65702cd1d4822f, keycount=12, bloomtype=ROW, size=17.6 K, encoding=NONE, compression=NONE, seqNum=32, earliestPutTs=1731504906985 2024-11-13T13:35:15,961 DEBUG [RS:0;bfeb2336aed7:33523-shortCompactions-0 {}] compactions.Compactor(225): Compacting 5cdc0464f023414fba63e8698647c01e, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=43, earliestPutTs=1731504914079 2024-11-13T13:35:15,962 DEBUG [RS:0;bfeb2336aed7:33523-shortCompactions-0 {}] compactions.Compactor(225): Compacting 8b5a231fa4df40b6b2b6b051ad176320, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=53, earliestPutTs=1731504915514 2024-11-13T13:35:15,980 INFO [RS:0;bfeb2336aed7:33523-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 791221fd56c9a0e65b7eca684d5a205f#info#compaction#24 average throughput is 12.31 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-13T13:35:15,980 DEBUG [RS:0;bfeb2336aed7:33523-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/data/default/TestLogRolling-testLogRollOnDatanodeDeath/791221fd56c9a0e65b7eca684d5a205f/.tmp/info/fc861952d2444f7eb8eced69f470ee91 is 1080, key is row0002/info:/1731504906985/Put/seqid=0 2024-11-13T13:35:15,982 WARN [Thread-970 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741883_1066 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T13:35:15,982 WARN [Thread-970 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1828668861-172.17.0.2-1731504888871:blk_1073741883_1066 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34283,DS-96fcb019-1c76-495f-b5d6-3eab33ac87a7,DISK], DatanodeInfoWithStorage[127.0.0.1:43767,DS-077399b2-6c0a-44a1-87c0-773de642c11b,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34283,DS-96fcb019-1c76-495f-b5d6-3eab33ac87a7,DISK]) is bad. 2024-11-13T13:35:15,982 WARN [Thread-970 {}] hdfs.DataStreamer(1850): Abandoning BP-1828668861-172.17.0.2-1731504888871:blk_1073741883_1066 2024-11-13T13:35:15,983 WARN [Thread-970 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34283,DS-96fcb019-1c76-495f-b5d6-3eab33ac87a7,DISK] 2024-11-13T13:35:15,984 WARN [Thread-970 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741884_1067 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T13:35:15,984 WARN [Thread-970 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1828668861-172.17.0.2-1731504888871:blk_1073741884_1067 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35467,DS-b26c0838-0080-4619-916b-50cda0a016e8,DISK], DatanodeInfoWithStorage[127.0.0.1:33931,DS-015271de-8eac-4900-8d07-4f9567b2b004,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:35467,DS-b26c0838-0080-4619-916b-50cda0a016e8,DISK]) is bad. 2024-11-13T13:35:15,984 WARN [Thread-970 {}] hdfs.DataStreamer(1850): Abandoning BP-1828668861-172.17.0.2-1731504888871:blk_1073741884_1067 2024-11-13T13:35:15,985 WARN [Thread-970 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35467,DS-b26c0838-0080-4619-916b-50cda0a016e8,DISK] 2024-11-13T13:35:15,987 WARN [Thread-970 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741885_1068 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:33563 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T13:35:15,987 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1667689267_22 at /127.0.0.1:52842 [Receiving block BP-1828668861-172.17.0.2-1731504888871:blk_1073741885_1068] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/210997d4-e94b-09b4-185f-d9911e45a816/cluster_4b5f2985-1792-e678-40f5-a273020dd5a5/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/210997d4-e94b-09b4-185f-d9911e45a816/cluster_4b5f2985-1792-e678-40f5-a273020dd5a5/data/data6]'}, localName='127.0.0.1:33931', datanodeUuid='8f99e9e2-208b-40da-abea-6371320ca5d9', xmitsInProgress=0}:Exception transferring block BP-1828668861-172.17.0.2-1731504888871:blk_1073741885_1068 to mirror 127.0.0.1:33563 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T13:35:15,987 WARN [Thread-970 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1828668861-172.17.0.2-1731504888871:blk_1073741885_1068 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33931,DS-015271de-8eac-4900-8d07-4f9567b2b004,DISK], DatanodeInfoWithStorage[127.0.0.1:33563,DS-1c4da21f-127f-48b1-b5b4-ef967b026978,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:33563,DS-1c4da21f-127f-48b1-b5b4-ef967b026978,DISK]) is bad. 2024-11-13T13:35:15,987 WARN [Thread-970 {}] hdfs.DataStreamer(1850): Abandoning BP-1828668861-172.17.0.2-1731504888871:blk_1073741885_1068 2024-11-13T13:35:15,987 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1667689267_22 at /127.0.0.1:52842 [Receiving block BP-1828668861-172.17.0.2-1731504888871:blk_1073741885_1068] {}] datanode.BlockReceiver(316): Block 1073741885 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-13T13:35:15,987 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1667689267_22 at /127.0.0.1:52842 [Receiving block BP-1828668861-172.17.0.2-1731504888871:blk_1073741885_1068] {}] datanode.DataXceiver(331): 127.0.0.1:33931:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:52842 dst: /127.0.0.1:33931 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T13:35:15,988 WARN [Thread-970 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33563,DS-1c4da21f-127f-48b1-b5b4-ef967b026978,DISK] 2024-11-13T13:35:15,989 WARN [Thread-970 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741886_1069 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T13:35:15,989 WARN [Thread-970 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1828668861-172.17.0.2-1731504888871:blk_1073741886_1069 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43767,DS-077399b2-6c0a-44a1-87c0-773de642c11b,DISK], DatanodeInfoWithStorage[127.0.0.1:33931,DS-015271de-8eac-4900-8d07-4f9567b2b004,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:43767,DS-077399b2-6c0a-44a1-87c0-773de642c11b,DISK]) is bad. 2024-11-13T13:35:15,989 WARN [Thread-970 {}] hdfs.DataStreamer(1850): Abandoning BP-1828668861-172.17.0.2-1731504888871:blk_1073741886_1069 2024-11-13T13:35:15,990 WARN [Thread-970 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:43767,DS-077399b2-6c0a-44a1-87c0-773de642c11b,DISK] 2024-11-13T13:35:15,991 WARN [IPC Server handler 3 on default port 40141 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-13T13:35:15,991 WARN [IPC Server handler 3 on default port 40141 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-13T13:35:15,991 WARN [IPC Server handler 3 on default port 40141 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-13T13:35:15,994 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33931 is added to blk_1073741887_1070 (size=18097) 2024-11-13T13:35:16,441 DEBUG [RS:0;bfeb2336aed7:33523-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/data/default/TestLogRolling-testLogRollOnDatanodeDeath/791221fd56c9a0e65b7eca684d5a205f/.tmp/info/fc861952d2444f7eb8eced69f470ee91 as hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/data/default/TestLogRolling-testLogRollOnDatanodeDeath/791221fd56c9a0e65b7eca684d5a205f/info/fc861952d2444f7eb8eced69f470ee91 2024-11-13T13:35:16,449 INFO [RS:0;bfeb2336aed7:33523-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 791221fd56c9a0e65b7eca684d5a205f/info of 791221fd56c9a0e65b7eca684d5a205f into fc861952d2444f7eb8eced69f470ee91(size=17.7 K), total size for store is 17.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-13T13:35:16,449 DEBUG [RS:0;bfeb2336aed7:33523-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 791221fd56c9a0e65b7eca684d5a205f: 2024-11-13T13:35:16,449 INFO [RS:0;bfeb2336aed7:33523-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRollOnDatanodeDeath,,1731504892646.791221fd56c9a0e65b7eca684d5a205f., storeName=791221fd56c9a0e65b7eca684d5a205f/info, priority=13, startTime=1731504915957; duration=0sec 2024-11-13T13:35:16,449 DEBUG [RS:0;bfeb2336aed7:33523-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.7 K, sizeToCheck=16.0 K 2024-11-13T13:35:16,449 DEBUG [RS:0;bfeb2336aed7:33523-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-13T13:35:16,449 DEBUG [RS:0;bfeb2336aed7:33523-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/data/default/TestLogRolling-testLogRollOnDatanodeDeath/791221fd56c9a0e65b7eca684d5a205f/info/fc861952d2444f7eb8eced69f470ee91 because midkey is the same as first or last row 2024-11-13T13:35:16,449 DEBUG [RS:0;bfeb2336aed7:33523-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.7 K, sizeToCheck=16.0 K 2024-11-13T13:35:16,449 DEBUG [RS:0;bfeb2336aed7:33523-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-13T13:35:16,449 DEBUG [RS:0;bfeb2336aed7:33523-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/data/default/TestLogRolling-testLogRollOnDatanodeDeath/791221fd56c9a0e65b7eca684d5a205f/info/fc861952d2444f7eb8eced69f470ee91 because midkey is the same as first or last row 2024-11-13T13:35:16,450 DEBUG [RS:0;bfeb2336aed7:33523-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.7 K, sizeToCheck=16.0 K 2024-11-13T13:35:16,450 DEBUG [RS:0;bfeb2336aed7:33523-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-13T13:35:16,450 DEBUG [RS:0;bfeb2336aed7:33523-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/data/default/TestLogRolling-testLogRollOnDatanodeDeath/791221fd56c9a0e65b7eca684d5a205f/info/fc861952d2444f7eb8eced69f470ee91 because midkey is the same as first or last row 2024-11-13T13:35:16,450 DEBUG [RS:0;bfeb2336aed7:33523-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-13T13:35:16,450 DEBUG [RS:0;bfeb2336aed7:33523-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 791221fd56c9a0e65b7eca684d5a205f:info 2024-11-13T13:35:16,567 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@4def23dd[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:33931, datanodeUuid=8f99e9e2-208b-40da-abea-6371320ca5d9, infoPort=45215, infoSecurePort=0, ipcPort=43989, storageInfo=lv=-57;cid=testClusterID;nsid=373864911;c=1731504888871):Failed to transfer BP-1828668861-172.17.0.2-1731504888871:blk_1073741867_1050 to 127.0.0.1:34283 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T13:35:16,567 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@4328d5c8[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:33931, datanodeUuid=8f99e9e2-208b-40da-abea-6371320ca5d9, infoPort=45215, infoSecurePort=0, ipcPort=43989, storageInfo=lv=-57;cid=testClusterID;nsid=373864911;c=1731504888871):Failed to transfer BP-1828668861-172.17.0.2-1731504888871:blk_1073741872_1055 to 127.0.0.1:43767 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T13:35:16,616 INFO [regionserver/bfeb2336aed7:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35467,DS-b26c0838-0080-4619-916b-50cda0a016e8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T13:35:16,930 WARN [regionserver/bfeb2336aed7:0.logRoller {}] wal.FSHLog(539): Too many consecutive RollWriter requests, it's a sign of the total number of live datanodes is lower than the tolerable replicas. 2024-11-13T13:35:16,930 INFO [regionserver/bfeb2336aed7:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35467,DS-b26c0838-0080-4619-916b-50cda0a016e8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T13:35:16,954 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-13T13:35:16,958 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-13T13:35:16,959 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-13T13:35:16,959 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-13T13:35:16,959 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-13T13:35:16,959 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6842affb{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/210997d4-e94b-09b4-185f-d9911e45a816/hadoop.log.dir/,AVAILABLE} 2024-11-13T13:35:16,960 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1542e930{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-13T13:35:17,059 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@561386dd{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/210997d4-e94b-09b4-185f-d9911e45a816/java.io.tmpdir/jetty-localhost-34933-hadoop-hdfs-3_4_1-tests_jar-_-any-1009693018549535554/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-13T13:35:17,060 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6c8ca2dc{HTTP/1.1, (http/1.1)}{localhost:34933} 2024-11-13T13:35:17,060 INFO [Time-limited test {}] server.Server(415): Started @131747ms 2024-11-13T13:35:17,061 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-13T13:35:17,361 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35467,DS-b26c0838-0080-4619-916b-50cda0a016e8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T13:35:17,475 WARN [Thread-989 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-13T13:35:17,485 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x45f2dd78c4742bbc with lease ID 0x2515494a6e271d78: from storage DS-077399b2-6c0a-44a1-87c0-773de642c11b node DatanodeRegistration(127.0.0.1:33149, datanodeUuid=e06bd827-5889-475e-8fba-56aa016f4c7b, infoPort=41381, infoSecurePort=0, ipcPort=44057, storageInfo=lv=-57;cid=testClusterID;nsid=373864911;c=1731504888871), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-13T13:35:17,485 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x45f2dd78c4742bbc with lease ID 0x2515494a6e271d78: from storage DS-a47574ed-07ff-4670-8b38-e596d474dbf4 node DatanodeRegistration(127.0.0.1:33149, datanodeUuid=e06bd827-5889-475e-8fba-56aa016f4c7b, infoPort=41381, infoSecurePort=0, ipcPort=44057, storageInfo=lv=-57;cid=testClusterID;nsid=373864911;c=1731504888871), blocks: 6, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-13T13:35:17,566 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@4def23dd[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:33931, datanodeUuid=8f99e9e2-208b-40da-abea-6371320ca5d9, infoPort=45215, infoSecurePort=0, ipcPort=43989, storageInfo=lv=-57;cid=testClusterID;nsid=373864911;c=1731504888871):Failed to transfer BP-1828668861-172.17.0.2-1731504888871:blk_1073741882_1065 to 127.0.0.1:34283 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T13:35:17,566 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@4328d5c8[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:33931, datanodeUuid=8f99e9e2-208b-40da-abea-6371320ca5d9, infoPort=45215, infoSecurePort=0, ipcPort=43989, storageInfo=lv=-57;cid=testClusterID;nsid=373864911;c=1731504888871):Failed to transfer BP-1828668861-172.17.0.2-1731504888871:blk_1073741862_1045 to 127.0.0.1:35467 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T13:35:18,616 INFO [regionserver/bfeb2336aed7:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35467,DS-b26c0838-0080-4619-916b-50cda0a016e8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T13:35:18,930 INFO [regionserver/bfeb2336aed7:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35467,DS-b26c0838-0080-4619-916b-50cda0a016e8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T13:35:19,362 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35467,DS-b26c0838-0080-4619-916b-50cda0a016e8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T13:35:19,569 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@4328d5c8[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:33931, datanodeUuid=8f99e9e2-208b-40da-abea-6371320ca5d9, infoPort=45215, infoSecurePort=0, ipcPort=43989, storageInfo=lv=-57;cid=testClusterID;nsid=373864911;c=1731504888871):Failed to transfer BP-1828668861-172.17.0.2-1731504888871:blk_1073741887_1070 to 127.0.0.1:33563 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T13:35:20,617 INFO [regionserver/bfeb2336aed7:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35467,DS-b26c0838-0080-4619-916b-50cda0a016e8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T13:35:20,931 INFO [regionserver/bfeb2336aed7:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35467,DS-b26c0838-0080-4619-916b-50cda0a016e8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T13:35:21,050 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-13T13:35:21,362 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35467,DS-b26c0838-0080-4619-916b-50cda0a016e8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T13:35:21,608 ERROR [FSHLog-0-hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/MasterData-prefix:bfeb2336aed7,39113,1731504891075 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35467,DS-b26c0838-0080-4619-916b-50cda0a016e8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T13:35:21,608 WARN [FSHLog-0-hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/MasterData-prefix:bfeb2336aed7,39113,1731504891075 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35467,DS-b26c0838-0080-4619-916b-50cda0a016e8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T13:35:21,609 DEBUG [master:store-WAL-Roller {}] wal.AbstractWALRoller(198): WAL FSHLog bfeb2336aed7%2C39113%2C1731504891075:(num 1731504891382) roll requested 2024-11-13T13:35:21,609 INFO [master:store-WAL-Roller {}] monitor.StreamSlowMonitor(122): New stream slow monitor bfeb2336aed7%2C39113%2C1731504891075.1731504921609 2024-11-13T13:35:21,614 WARN [Thread-1009 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741888_1071 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T13:35:21,614 WARN [Thread-1009 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1828668861-172.17.0.2-1731504888871:blk_1073741888_1071 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35467,DS-b26c0838-0080-4619-916b-50cda0a016e8,DISK], DatanodeInfoWithStorage[127.0.0.1:33149,DS-077399b2-6c0a-44a1-87c0-773de642c11b,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:35467,DS-b26c0838-0080-4619-916b-50cda0a016e8,DISK]) is bad. 2024-11-13T13:35:21,614 WARN [Thread-1009 {}] hdfs.DataStreamer(1850): Abandoning BP-1828668861-172.17.0.2-1731504888871:blk_1073741888_1071 2024-11-13T13:35:21,615 WARN [Thread-1009 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35467,DS-b26c0838-0080-4619-916b-50cda0a016e8,DISK] 2024-11-13T13:35:21,617 WARN [Thread-1009 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741889_1072 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T13:35:21,618 WARN [Thread-1009 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1828668861-172.17.0.2-1731504888871:blk_1073741889_1072 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34283,DS-96fcb019-1c76-495f-b5d6-3eab33ac87a7,DISK], DatanodeInfoWithStorage[127.0.0.1:33563,DS-1c4da21f-127f-48b1-b5b4-ef967b026978,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34283,DS-96fcb019-1c76-495f-b5d6-3eab33ac87a7,DISK]) is bad. 2024-11-13T13:35:21,618 WARN [Thread-1009 {}] hdfs.DataStreamer(1850): Abandoning BP-1828668861-172.17.0.2-1731504888871:blk_1073741889_1072 2024-11-13T13:35:21,619 WARN [Thread-1009 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34283,DS-96fcb019-1c76-495f-b5d6-3eab33ac87a7,DISK] 2024-11-13T13:35:21,621 WARN [Thread-1009 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741890_1073 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T13:35:21,621 WARN [Thread-1009 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1828668861-172.17.0.2-1731504888871:blk_1073741890_1073 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33563,DS-1c4da21f-127f-48b1-b5b4-ef967b026978,DISK], DatanodeInfoWithStorage[127.0.0.1:33149,DS-077399b2-6c0a-44a1-87c0-773de642c11b,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33563,DS-1c4da21f-127f-48b1-b5b4-ef967b026978,DISK]) is bad. 2024-11-13T13:35:21,621 WARN [Thread-1009 {}] hdfs.DataStreamer(1850): Abandoning BP-1828668861-172.17.0.2-1731504888871:blk_1073741890_1073 2024-11-13T13:35:21,622 WARN [Thread-1009 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33563,DS-1c4da21f-127f-48b1-b5b4-ef967b026978,DISK] 2024-11-13T13:35:21,627 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:35:21,627 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:35:21,627 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:35:21,628 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:35:21,628 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:35:21,628 INFO [master:store-WAL-Roller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/MasterData/WALs/bfeb2336aed7,39113,1731504891075/bfeb2336aed7%2C39113%2C1731504891075.1731504891382 with entries=54, filesize=26.67 KB; new WAL /user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/MasterData/WALs/bfeb2336aed7,39113,1731504891075/bfeb2336aed7%2C39113%2C1731504891075.1731504921609 2024-11-13T13:35:21,628 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35467,DS-b26c0838-0080-4619-916b-50cda0a016e8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T13:35:21,629 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35467,DS-b26c0838-0080-4619-916b-50cda0a016e8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T13:35:21,629 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/MasterData/WALs/bfeb2336aed7,39113,1731504891075/bfeb2336aed7%2C39113%2C1731504891075.1731504891382 2024-11-13T13:35:21,629 WARN [IPC Server handler 4 on default port 40141 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/MasterData/WALs/bfeb2336aed7,39113,1731504891075/bfeb2336aed7%2C39113%2C1731504891075.1731504891382 has not been closed. Lease recovery is in progress. RecoveryId = 1075 for block blk_1073741830_1006 2024-11-13T13:35:21,630 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/MasterData/WALs/bfeb2336aed7,39113,1731504891075/bfeb2336aed7%2C39113%2C1731504891075.1731504891382 after 1ms 2024-11-13T13:35:21,630 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41381:41381),(127.0.0.1/127.0.0.1:45215:45215)] 2024-11-13T13:35:21,630 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(879): hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/MasterData/WALs/bfeb2336aed7,39113,1731504891075/bfeb2336aed7%2C39113%2C1731504891075.1731504891382 is not closed yet, will try archiving it next time 2024-11-13T13:35:22,617 INFO [regionserver/bfeb2336aed7:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35467,DS-b26c0838-0080-4619-916b-50cda0a016e8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T13:35:22,931 INFO [regionserver/bfeb2336aed7:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35467,DS-b26c0838-0080-4619-916b-50cda0a016e8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T13:35:24,618 INFO [regionserver/bfeb2336aed7:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35467,DS-b26c0838-0080-4619-916b-50cda0a016e8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T13:35:24,932 INFO [regionserver/bfeb2336aed7:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35467,DS-b26c0838-0080-4619-916b-50cda0a016e8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T13:35:25,632 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/MasterData/WALs/bfeb2336aed7,39113,1731504891075/bfeb2336aed7%2C39113%2C1731504891075.1731504891382 after 4003ms 2024-11-13T13:35:26,618 INFO [regionserver/bfeb2336aed7:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35467,DS-b26c0838-0080-4619-916b-50cda0a016e8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T13:35:26,932 INFO [regionserver/bfeb2336aed7:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35467,DS-b26c0838-0080-4619-916b-50cda0a016e8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T13:35:27,501 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@a66577b {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-1828668861-172.17.0.2-1731504888871:blk_1073741833_1009, datanode=DatanodeInfoWithStorage[127.0.0.1:35467,null,null]) java.net.ConnectException: Call From bfeb2336aed7/172.17.0.2 to localhost:38563 failed on connection exception: java.net.ConnectException: Connection refused; For more details see: http://wiki.apache.org/hadoop/ConnectionRefused at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:876) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:668) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-11-13T13:35:27,510 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33149 is added to blk_1073741833_1019 (size=455) 2024-11-13T13:35:27,884 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,33523,1731504891245/bfeb2336aed7%2C33523%2C1731504891245.1731504891893 to hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/oldWALs/bfeb2336aed7%2C33523%2C1731504891245.1731504891893 2024-11-13T13:35:27,888 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,33523,1731504891245/bfeb2336aed7%2C33523%2C1731504891245.1731504912881 to hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/oldWALs/bfeb2336aed7%2C33523%2C1731504891245.1731504912881 2024-11-13T13:35:28,618 INFO [regionserver/bfeb2336aed7:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35467,DS-b26c0838-0080-4619-916b-50cda0a016e8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T13:35:28,933 INFO [regionserver/bfeb2336aed7:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35467,DS-b26c0838-0080-4619-916b-50cda0a016e8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T13:35:29,488 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33931 is added to blk_1073741833_1019 (size=455) 2024-11-13T13:35:30,619 INFO [regionserver/bfeb2336aed7:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35467,DS-b26c0838-0080-4619-916b-50cda0a016e8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T13:35:30,737 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor bfeb2336aed7%2C33523%2C1731504891245.1731504930736 2024-11-13T13:35:30,744 WARN [Thread-1020 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741892_1076 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T13:35:30,744 WARN [Thread-1020 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1828668861-172.17.0.2-1731504888871:blk_1073741892_1076 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34283,DS-96fcb019-1c76-495f-b5d6-3eab33ac87a7,DISK], DatanodeInfoWithStorage[127.0.0.1:33149,DS-077399b2-6c0a-44a1-87c0-773de642c11b,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34283,DS-96fcb019-1c76-495f-b5d6-3eab33ac87a7,DISK]) is bad. 2024-11-13T13:35:30,744 WARN [Thread-1020 {}] hdfs.DataStreamer(1850): Abandoning BP-1828668861-172.17.0.2-1731504888871:blk_1073741892_1076 2024-11-13T13:35:30,746 WARN [Thread-1020 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34283,DS-96fcb019-1c76-495f-b5d6-3eab33ac87a7,DISK] 2024-11-13T13:35:30,751 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:35:30,751 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:35:30,751 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:35:30,751 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:35:30,751 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:35:30,751 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,33523,1731504891245/bfeb2336aed7%2C33523%2C1731504891245.1731504914909 with entries=12, filesize=11.46 KB; new WAL /user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,33523,1731504891245/bfeb2336aed7%2C33523%2C1731504891245.1731504930736 2024-11-13T13:35:30,752 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41381:41381),(127.0.0.1/127.0.0.1:45215:45215)] 2024-11-13T13:35:30,752 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,33523,1731504891245/bfeb2336aed7%2C33523%2C1731504891245.1731504914909 is not closed yet, will try archiving it next time 2024-11-13T13:35:30,753 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33931 is added to blk_1073741877_1060 (size=11743) 2024-11-13T13:35:30,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33523 {}] regionserver.HRegion(8855): Flush requested on 791221fd56c9a0e65b7eca684d5a205f 2024-11-13T13:35:30,755 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 791221fd56c9a0e65b7eca684d5a205f 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-13T13:35:30,759 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/data/default/TestLogRolling-testLogRollOnDatanodeDeath/791221fd56c9a0e65b7eca684d5a205f/.tmp/info/0c7fc50e456d49f78fe32b5ef08b8ac7 is 1080, key is row0013/info:/1731504930754/Put/seqid=0 2024-11-13T13:35:30,762 WARN [Thread-1026 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741894_1078 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:34283 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T13:35:30,762 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1667689267_22 at /127.0.0.1:59978 [Receiving block BP-1828668861-172.17.0.2-1731504888871:blk_1073741894_1078] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/210997d4-e94b-09b4-185f-d9911e45a816/cluster_4b5f2985-1792-e678-40f5-a273020dd5a5/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/210997d4-e94b-09b4-185f-d9911e45a816/cluster_4b5f2985-1792-e678-40f5-a273020dd5a5/data/data6]'}, localName='127.0.0.1:33931', datanodeUuid='8f99e9e2-208b-40da-abea-6371320ca5d9', xmitsInProgress=0}:Exception transferring block BP-1828668861-172.17.0.2-1731504888871:blk_1073741894_1078 to mirror 127.0.0.1:34283 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T13:35:30,762 WARN [Thread-1026 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1828668861-172.17.0.2-1731504888871:blk_1073741894_1078 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33931,DS-015271de-8eac-4900-8d07-4f9567b2b004,DISK], DatanodeInfoWithStorage[127.0.0.1:34283,DS-96fcb019-1c76-495f-b5d6-3eab33ac87a7,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:34283,DS-96fcb019-1c76-495f-b5d6-3eab33ac87a7,DISK]) is bad. 2024-11-13T13:35:30,762 WARN [Thread-1026 {}] hdfs.DataStreamer(1850): Abandoning BP-1828668861-172.17.0.2-1731504888871:blk_1073741894_1078 2024-11-13T13:35:30,762 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1667689267_22 at /127.0.0.1:59978 [Receiving block BP-1828668861-172.17.0.2-1731504888871:blk_1073741894_1078] {}] datanode.BlockReceiver(316): Block 1073741894 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-13T13:35:30,763 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1667689267_22 at /127.0.0.1:59978 [Receiving block BP-1828668861-172.17.0.2-1731504888871:blk_1073741894_1078] {}] datanode.DataXceiver(331): 127.0.0.1:33931:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:59978 dst: /127.0.0.1:33931 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T13:35:30,763 WARN [Thread-1026 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34283,DS-96fcb019-1c76-495f-b5d6-3eab33ac87a7,DISK] 2024-11-13T13:35:30,768 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33149 is added to blk_1073741895_1079 (size=7109) 2024-11-13T13:35:30,768 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33931 is added to blk_1073741895_1079 (size=7109) 2024-11-13T13:35:30,769 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=64 (bloomFilter=true), to=hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/data/default/TestLogRolling-testLogRollOnDatanodeDeath/791221fd56c9a0e65b7eca684d5a205f/.tmp/info/0c7fc50e456d49f78fe32b5ef08b8ac7 2024-11-13T13:35:30,779 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/data/default/TestLogRolling-testLogRollOnDatanodeDeath/791221fd56c9a0e65b7eca684d5a205f/.tmp/info/0c7fc50e456d49f78fe32b5ef08b8ac7 as hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/data/default/TestLogRolling-testLogRollOnDatanodeDeath/791221fd56c9a0e65b7eca684d5a205f/info/0c7fc50e456d49f78fe32b5ef08b8ac7 2024-11-13T13:35:30,786 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/data/default/TestLogRolling-testLogRollOnDatanodeDeath/791221fd56c9a0e65b7eca684d5a205f/info/0c7fc50e456d49f78fe32b5ef08b8ac7, entries=2, sequenceid=64, filesize=6.9 K 2024-11-13T13:35:30,787 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7526, heapSize ~8.11 KB/8304, currentSize=10.51 KB/10759 for 791221fd56c9a0e65b7eca684d5a205f in 32ms, sequenceid=64, compaction requested=false 2024-11-13T13:35:30,787 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 791221fd56c9a0e65b7eca684d5a205f: 2024-11-13T13:35:30,787 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=24.6 K, sizeToCheck=16.0 K 2024-11-13T13:35:30,788 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-13T13:35:30,788 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/data/default/TestLogRolling-testLogRollOnDatanodeDeath/791221fd56c9a0e65b7eca684d5a205f/info/fc861952d2444f7eb8eced69f470ee91 because midkey is the same as first or last row 2024-11-13T13:35:30,933 INFO [regionserver/bfeb2336aed7:0.logRoller {}] wal.FSHLog(556): LowReplication-Roller was enabled. 2024-11-13T13:35:30,933 INFO [regionserver/bfeb2336aed7:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35467,DS-b26c0838-0080-4619-916b-50cda0a016e8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T13:35:30,979 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-13T13:35:30,979 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-13T13:35:30,980 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-13T13:35:30,980 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T13:35:30,981 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T13:35:30,981 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-13T13:35:30,981 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-13T13:35:30,981 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=962798992, stopped=false 2024-11-13T13:35:30,981 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=bfeb2336aed7,39113,1731504891075 2024-11-13T13:35:31,050 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33523-0x1013467e0590001, quorum=127.0.0.1:56840, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-13T13:35:31,050 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39113-0x1013467e0590000, quorum=127.0.0.1:56840, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-13T13:35:31,050 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38781-0x1013467e0590002, quorum=127.0.0.1:56840, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-13T13:35:31,050 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33523-0x1013467e0590001, quorum=127.0.0.1:56840, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T13:35:31,050 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39113-0x1013467e0590000, quorum=127.0.0.1:56840, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T13:35:31,050 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38781-0x1013467e0590002, quorum=127.0.0.1:56840, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T13:35:31,051 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-13T13:35:31,052 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:38781-0x1013467e0590002, quorum=127.0.0.1:56840, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-13T13:35:31,053 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:33523-0x1013467e0590001, quorum=127.0.0.1:56840, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-13T13:35:31,053 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:39113-0x1013467e0590000, quorum=127.0.0.1:56840, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-13T13:35:31,053 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-13T13:35:31,053 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-13T13:35:31,053 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T13:35:31,054 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'bfeb2336aed7,33523,1731504891245' ***** 2024-11-13T13:35:31,054 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-13T13:35:31,054 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'bfeb2336aed7,38781,1731504892507' ***** 2024-11-13T13:35:31,054 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-13T13:35:31,054 INFO [RS:0;bfeb2336aed7:33523 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-13T13:35:31,054 INFO [RS:0;bfeb2336aed7:33523 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-13T13:35:31,054 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-13T13:35:31,054 INFO [RS:0;bfeb2336aed7:33523 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-13T13:35:31,055 INFO [RS:0;bfeb2336aed7:33523 {}] regionserver.HRegionServer(3091): Received CLOSE for 791221fd56c9a0e65b7eca684d5a205f 2024-11-13T13:35:31,055 INFO [RS:1;bfeb2336aed7:38781 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-13T13:35:31,055 INFO [RS:1;bfeb2336aed7:38781 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-13T13:35:31,055 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-13T13:35:31,055 INFO [RS:1;bfeb2336aed7:38781 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-13T13:35:31,055 INFO [RS:1;bfeb2336aed7:38781 {}] regionserver.HRegionServer(959): stopping server bfeb2336aed7,38781,1731504892507 2024-11-13T13:35:31,055 INFO [RS:0;bfeb2336aed7:33523 {}] regionserver.HRegionServer(959): stopping server bfeb2336aed7,33523,1731504891245 2024-11-13T13:35:31,055 INFO [RS:0;bfeb2336aed7:33523 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-13T13:35:31,055 INFO [RS:1;bfeb2336aed7:38781 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-13T13:35:31,055 INFO [RS:0;bfeb2336aed7:33523 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;bfeb2336aed7:33523. 2024-11-13T13:35:31,055 INFO [RS:1;bfeb2336aed7:38781 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;bfeb2336aed7:38781. 2024-11-13T13:35:31,056 DEBUG [RS:0;bfeb2336aed7:33523 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-13T13:35:31,056 DEBUG [RS:1;bfeb2336aed7:38781 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-13T13:35:31,055 DEBUG [RS_CLOSE_REGION-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 791221fd56c9a0e65b7eca684d5a205f, disabling compactions & flushes 2024-11-13T13:35:31,056 DEBUG [RS:1;bfeb2336aed7:38781 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T13:35:31,056 DEBUG [RS:0;bfeb2336aed7:33523 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T13:35:31,056 INFO [RS_CLOSE_REGION-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnDatanodeDeath,,1731504892646.791221fd56c9a0e65b7eca684d5a205f. 2024-11-13T13:35:31,056 INFO [RS:1;bfeb2336aed7:38781 {}] regionserver.HRegionServer(976): stopping server bfeb2336aed7,38781,1731504892507; all regions closed. 2024-11-13T13:35:31,056 INFO [RS:0;bfeb2336aed7:33523 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-13T13:35:31,056 INFO [RS:0;bfeb2336aed7:33523 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-13T13:35:31,056 DEBUG [RS_CLOSE_REGION-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1731504892646.791221fd56c9a0e65b7eca684d5a205f. 2024-11-13T13:35:31,056 INFO [RS:0;bfeb2336aed7:33523 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-13T13:35:31,056 DEBUG [RS_CLOSE_REGION-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1731504892646.791221fd56c9a0e65b7eca684d5a205f. after waiting 0 ms 2024-11-13T13:35:31,056 INFO [RS:0;bfeb2336aed7:33523 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-13T13:35:31,056 DEBUG [RS_CLOSE_REGION-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnDatanodeDeath,,1731504892646.791221fd56c9a0e65b7eca684d5a205f. 2024-11-13T13:35:31,057 INFO [RS_CLOSE_REGION-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing 791221fd56c9a0e65b7eca684d5a205f 1/1 column families, dataSize=10.51 KB heapSize=11.50 KB 2024-11-13T13:35:31,057 INFO [RS:0;bfeb2336aed7:33523 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-13T13:35:31,057 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:35:31,057 DEBUG [RS:0;bfeb2336aed7:33523 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740, 791221fd56c9a0e65b7eca684d5a205f=TestLogRolling-testLogRollOnDatanodeDeath,,1731504892646.791221fd56c9a0e65b7eca684d5a205f.} 2024-11-13T13:35:31,057 DEBUG [RS:0;bfeb2336aed7:33523 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 791221fd56c9a0e65b7eca684d5a205f 2024-11-13T13:35:31,057 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:35:31,057 DEBUG [RS_CLOSE_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-13T13:35:31,057 INFO [RS_CLOSE_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-13T13:35:31,057 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:35:31,057 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:35:31,057 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:35:31,057 DEBUG [RS_CLOSE_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-13T13:35:31,057 DEBUG [RS_CLOSE_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-13T13:35:31,058 DEBUG [RS_CLOSE_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-13T13:35:31,058 INFO [RS_CLOSE_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.71 KB heapSize=3.75 KB 2024-11-13T13:35:31,058 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35467,DS-b26c0838-0080-4619-916b-50cda0a016e8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T13:35:31,058 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35467,DS-b26c0838-0080-4619-916b-50cda0a016e8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T13:35:31,058 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,38781,1731504892507/bfeb2336aed7%2C38781%2C1731504892507.1731504892747 2024-11-13T13:35:31,058 ERROR [FSHLog-0-hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239-prefix:bfeb2336aed7,33523,1731504891245.meta {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35467,DS-b26c0838-0080-4619-916b-50cda0a016e8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T13:35:31,059 WARN [FSHLog-0-hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239-prefix:bfeb2336aed7,33523,1731504891245.meta {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35467,DS-b26c0838-0080-4619-916b-50cda0a016e8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T13:35:31,059 DEBUG [regionserver/bfeb2336aed7:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog bfeb2336aed7%2C33523%2C1731504891245.meta:.meta(num 1731504892258) roll requested 2024-11-13T13:35:31,059 WARN [IPC Server handler 4 on default port 40141 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,38781,1731504892507/bfeb2336aed7%2C38781%2C1731504892507.1731504892747 has not been closed. Lease recovery is in progress. RecoveryId = 1080 for block blk_1073741837_1013 2024-11-13T13:35:31,059 INFO [regionserver/bfeb2336aed7:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor bfeb2336aed7%2C33523%2C1731504891245.meta.1731504931059.meta 2024-11-13T13:35:31,059 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,38781,1731504892507/bfeb2336aed7%2C38781%2C1731504892507.1731504892747 after 1ms 2024-11-13T13:35:31,061 WARN [Thread-1035 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741896_1081 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T13:35:31,062 WARN [Thread-1035 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1828668861-172.17.0.2-1731504888871:blk_1073741896_1081 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34283,DS-96fcb019-1c76-495f-b5d6-3eab33ac87a7,DISK], DatanodeInfoWithStorage[127.0.0.1:33931,DS-015271de-8eac-4900-8d07-4f9567b2b004,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34283,DS-96fcb019-1c76-495f-b5d6-3eab33ac87a7,DISK]) is bad. 2024-11-13T13:35:31,062 WARN [Thread-1035 {}] hdfs.DataStreamer(1850): Abandoning BP-1828668861-172.17.0.2-1731504888871:blk_1073741896_1081 2024-11-13T13:35:31,062 DEBUG [RS_CLOSE_REGION-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/data/default/TestLogRolling-testLogRollOnDatanodeDeath/791221fd56c9a0e65b7eca684d5a205f/.tmp/info/2f971f7566eb46ebb13b431b57c1d2fc is 1080, key is row0014/info:/1731504930756/Put/seqid=0 2024-11-13T13:35:31,062 WARN [Thread-1035 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34283,DS-96fcb019-1c76-495f-b5d6-3eab33ac87a7,DISK] 2024-11-13T13:35:31,065 WARN [Thread-1036 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741898_1083 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:34283 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T13:35:31,065 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1667689267_22 at /127.0.0.1:60010 [Receiving block BP-1828668861-172.17.0.2-1731504888871:blk_1073741898_1083] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/210997d4-e94b-09b4-185f-d9911e45a816/cluster_4b5f2985-1792-e678-40f5-a273020dd5a5/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/210997d4-e94b-09b4-185f-d9911e45a816/cluster_4b5f2985-1792-e678-40f5-a273020dd5a5/data/data6]'}, localName='127.0.0.1:33931', datanodeUuid='8f99e9e2-208b-40da-abea-6371320ca5d9', xmitsInProgress=0}:Exception transferring block BP-1828668861-172.17.0.2-1731504888871:blk_1073741898_1083 to mirror 127.0.0.1:34283 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T13:35:31,065 WARN [Thread-1036 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1828668861-172.17.0.2-1731504888871:blk_1073741898_1083 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33931,DS-015271de-8eac-4900-8d07-4f9567b2b004,DISK], DatanodeInfoWithStorage[127.0.0.1:34283,DS-96fcb019-1c76-495f-b5d6-3eab33ac87a7,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:34283,DS-96fcb019-1c76-495f-b5d6-3eab33ac87a7,DISK]) is bad. 2024-11-13T13:35:31,065 WARN [Thread-1036 {}] hdfs.DataStreamer(1850): Abandoning BP-1828668861-172.17.0.2-1731504888871:blk_1073741898_1083 2024-11-13T13:35:31,065 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1667689267_22 at /127.0.0.1:60010 [Receiving block BP-1828668861-172.17.0.2-1731504888871:blk_1073741898_1083] {}] datanode.BlockReceiver(316): Block 1073741898 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-13T13:35:31,065 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1667689267_22 at /127.0.0.1:60010 [Receiving block BP-1828668861-172.17.0.2-1731504888871:blk_1073741898_1083] {}] datanode.DataXceiver(331): 127.0.0.1:33931:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:60010 dst: /127.0.0.1:33931 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T13:35:31,066 WARN [Thread-1036 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34283,DS-96fcb019-1c76-495f-b5d6-3eab33ac87a7,DISK] 2024-11-13T13:35:31,070 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:35:31,071 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:35:31,071 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:35:31,071 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:35:31,071 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:35:31,071 INFO [regionserver/bfeb2336aed7:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,33523,1731504891245/bfeb2336aed7%2C33523%2C1731504891245.meta.1731504892258.meta with entries=8, filesize=2.33 KB; new WAL /user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,33523,1731504891245/bfeb2336aed7%2C33523%2C1731504891245.meta.1731504931059.meta 2024-11-13T13:35:31,072 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35467,DS-b26c0838-0080-4619-916b-50cda0a016e8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T13:35:31,073 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35467,DS-b26c0838-0080-4619-916b-50cda0a016e8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T13:35:31,073 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,33523,1731504891245/bfeb2336aed7%2C33523%2C1731504891245.meta.1731504892258.meta 2024-11-13T13:35:31,073 WARN [IPC Server handler 0 on default port 40141 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,33523,1731504891245/bfeb2336aed7%2C33523%2C1731504891245.meta.1731504892258.meta has not been closed. Lease recovery is in progress. RecoveryId = 1085 for block blk_1073741834_1010 2024-11-13T13:35:31,073 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,33523,1731504891245/bfeb2336aed7%2C33523%2C1731504891245.meta.1731504892258.meta after 0ms 2024-11-13T13:35:31,073 DEBUG [regionserver/bfeb2336aed7:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41381:41381),(127.0.0.1/127.0.0.1:45215:45215)] 2024-11-13T13:35:31,073 DEBUG [regionserver/bfeb2336aed7:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,33523,1731504891245/bfeb2336aed7%2C33523%2C1731504891245.meta.1731504892258.meta is not closed yet, will try archiving it next time 2024-11-13T13:35:31,074 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33149 is added to blk_1073741899_1084 (size=15737) 2024-11-13T13:35:31,075 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33931 is added to blk_1073741899_1084 (size=15737) 2024-11-13T13:35:31,075 INFO [RS_CLOSE_REGION-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=10.51 KB at sequenceid=77 (bloomFilter=true), to=hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/data/default/TestLogRolling-testLogRollOnDatanodeDeath/791221fd56c9a0e65b7eca684d5a205f/.tmp/info/2f971f7566eb46ebb13b431b57c1d2fc 2024-11-13T13:35:31,081 DEBUG [RS_CLOSE_REGION-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/data/default/TestLogRolling-testLogRollOnDatanodeDeath/791221fd56c9a0e65b7eca684d5a205f/.tmp/info/2f971f7566eb46ebb13b431b57c1d2fc as hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/data/default/TestLogRolling-testLogRollOnDatanodeDeath/791221fd56c9a0e65b7eca684d5a205f/info/2f971f7566eb46ebb13b431b57c1d2fc 2024-11-13T13:35:31,087 INFO [RS_CLOSE_REGION-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/data/default/TestLogRolling-testLogRollOnDatanodeDeath/791221fd56c9a0e65b7eca684d5a205f/info/2f971f7566eb46ebb13b431b57c1d2fc, entries=10, sequenceid=77, filesize=15.4 K 2024-11-13T13:35:31,088 INFO [RS_CLOSE_REGION-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~10.51 KB/10759, heapSize ~11.48 KB/11760, currentSize=0 B/0 for 791221fd56c9a0e65b7eca684d5a205f in 32ms, sequenceid=77, compaction requested=true 2024-11-13T13:35:31,089 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731504892646.791221fd56c9a0e65b7eca684d5a205f.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/data/default/TestLogRolling-testLogRollOnDatanodeDeath/791221fd56c9a0e65b7eca684d5a205f/info/a89abbf15cac4cad9521dc2989663b30, hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/data/default/TestLogRolling-testLogRollOnDatanodeDeath/791221fd56c9a0e65b7eca684d5a205f/info/0b910b2348c64a5691f6ec9319489b93, hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/data/default/TestLogRolling-testLogRollOnDatanodeDeath/791221fd56c9a0e65b7eca684d5a205f/info/a1d06f447e274837ab65702cd1d4822f, hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/data/default/TestLogRolling-testLogRollOnDatanodeDeath/791221fd56c9a0e65b7eca684d5a205f/info/752738c2a8c6483eb24ba5fde94235bc, hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/data/default/TestLogRolling-testLogRollOnDatanodeDeath/791221fd56c9a0e65b7eca684d5a205f/info/5cdc0464f023414fba63e8698647c01e, hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/data/default/TestLogRolling-testLogRollOnDatanodeDeath/791221fd56c9a0e65b7eca684d5a205f/info/8b5a231fa4df40b6b2b6b051ad176320] to archive 2024-11-13T13:35:31,090 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731504892646.791221fd56c9a0e65b7eca684d5a205f.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-13T13:35:31,091 DEBUG [RS_CLOSE_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/data/hbase/meta/1588230740/.tmp/info/a5553e90c11643bc80134dbce92a01cf is 203, key is TestLogRolling-testLogRollOnDatanodeDeath,,1731504892646.791221fd56c9a0e65b7eca684d5a205f./info:regioninfo/1731504893021/Put/seqid=0 2024-11-13T13:35:31,092 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731504892646.791221fd56c9a0e65b7eca684d5a205f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/data/default/TestLogRolling-testLogRollOnDatanodeDeath/791221fd56c9a0e65b7eca684d5a205f/info/a89abbf15cac4cad9521dc2989663b30 to hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/791221fd56c9a0e65b7eca684d5a205f/info/a89abbf15cac4cad9521dc2989663b30 2024-11-13T13:35:31,094 WARN [Thread-1048 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741900_1086 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:34283 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T13:35:31,094 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1667689267_22 at /127.0.0.1:40014 [Receiving block BP-1828668861-172.17.0.2-1731504888871:blk_1073741900_1086] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/210997d4-e94b-09b4-185f-d9911e45a816/cluster_4b5f2985-1792-e678-40f5-a273020dd5a5/data/data3, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/210997d4-e94b-09b4-185f-d9911e45a816/cluster_4b5f2985-1792-e678-40f5-a273020dd5a5/data/data4]'}, localName='127.0.0.1:33149', datanodeUuid='e06bd827-5889-475e-8fba-56aa016f4c7b', xmitsInProgress=0}:Exception transferring block BP-1828668861-172.17.0.2-1731504888871:blk_1073741900_1086 to mirror 127.0.0.1:34283 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T13:35:31,094 WARN [Thread-1048 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1828668861-172.17.0.2-1731504888871:blk_1073741900_1086 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33149,DS-077399b2-6c0a-44a1-87c0-773de642c11b,DISK], DatanodeInfoWithStorage[127.0.0.1:34283,DS-96fcb019-1c76-495f-b5d6-3eab33ac87a7,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:34283,DS-96fcb019-1c76-495f-b5d6-3eab33ac87a7,DISK]) is bad. 2024-11-13T13:35:31,094 WARN [Thread-1048 {}] hdfs.DataStreamer(1850): Abandoning BP-1828668861-172.17.0.2-1731504888871:blk_1073741900_1086 2024-11-13T13:35:31,094 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1667689267_22 at /127.0.0.1:40014 [Receiving block BP-1828668861-172.17.0.2-1731504888871:blk_1073741900_1086] {}] datanode.BlockReceiver(316): Block 1073741900 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-13T13:35:31,094 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1667689267_22 at /127.0.0.1:40014 [Receiving block BP-1828668861-172.17.0.2-1731504888871:blk_1073741900_1086] {}] datanode.DataXceiver(331): 127.0.0.1:33149:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:40014 dst: /127.0.0.1:33149 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T13:35:31,095 WARN [Thread-1048 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34283,DS-96fcb019-1c76-495f-b5d6-3eab33ac87a7,DISK] 2024-11-13T13:35:31,100 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731504892646.791221fd56c9a0e65b7eca684d5a205f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/data/default/TestLogRolling-testLogRollOnDatanodeDeath/791221fd56c9a0e65b7eca684d5a205f/info/0b910b2348c64a5691f6ec9319489b93 to hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/791221fd56c9a0e65b7eca684d5a205f/info/0b910b2348c64a5691f6ec9319489b93 2024-11-13T13:35:31,102 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731504892646.791221fd56c9a0e65b7eca684d5a205f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/data/default/TestLogRolling-testLogRollOnDatanodeDeath/791221fd56c9a0e65b7eca684d5a205f/info/a1d06f447e274837ab65702cd1d4822f to hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/791221fd56c9a0e65b7eca684d5a205f/info/a1d06f447e274837ab65702cd1d4822f 2024-11-13T13:35:31,104 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731504892646.791221fd56c9a0e65b7eca684d5a205f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/data/default/TestLogRolling-testLogRollOnDatanodeDeath/791221fd56c9a0e65b7eca684d5a205f/info/752738c2a8c6483eb24ba5fde94235bc to hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/791221fd56c9a0e65b7eca684d5a205f/info/752738c2a8c6483eb24ba5fde94235bc 2024-11-13T13:35:31,106 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731504892646.791221fd56c9a0e65b7eca684d5a205f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/data/default/TestLogRolling-testLogRollOnDatanodeDeath/791221fd56c9a0e65b7eca684d5a205f/info/5cdc0464f023414fba63e8698647c01e to hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/791221fd56c9a0e65b7eca684d5a205f/info/5cdc0464f023414fba63e8698647c01e 2024-11-13T13:35:31,107 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731504892646.791221fd56c9a0e65b7eca684d5a205f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/data/default/TestLogRolling-testLogRollOnDatanodeDeath/791221fd56c9a0e65b7eca684d5a205f/info/8b5a231fa4df40b6b2b6b051ad176320 to hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/791221fd56c9a0e65b7eca684d5a205f/info/8b5a231fa4df40b6b2b6b051ad176320 2024-11-13T13:35:31,108 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731504892646.791221fd56c9a0e65b7eca684d5a205f.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=bfeb2336aed7:39113 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 16 more 2024-11-13T13:35:31,108 WARN [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731504892646.791221fd56c9a0e65b7eca684d5a205f.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [a89abbf15cac4cad9521dc2989663b30=10347, 0b910b2348c64a5691f6ec9319489b93=12506, a1d06f447e274837ab65702cd1d4822f=17994, 752738c2a8c6483eb24ba5fde94235bc=6027, 5cdc0464f023414fba63e8698647c01e=6027, 8b5a231fa4df40b6b2b6b051ad176320=6027] 2024-11-13T13:35:31,111 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33931 is added to blk_1073741901_1087 (size=7089) 2024-11-13T13:35:31,111 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33149 is added to blk_1073741901_1087 (size=7089) 2024-11-13T13:35:31,112 INFO [RS_CLOSE_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.50 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/data/hbase/meta/1588230740/.tmp/info/a5553e90c11643bc80134dbce92a01cf 2024-11-13T13:35:31,114 DEBUG [RS_CLOSE_REGION-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/data/default/TestLogRolling-testLogRollOnDatanodeDeath/791221fd56c9a0e65b7eca684d5a205f/recovered.edits/80.seqid, newMaxSeqId=80, maxSeqId=1 2024-11-13T13:35:31,115 INFO [RS_CLOSE_REGION-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1731504892646.791221fd56c9a0e65b7eca684d5a205f. 2024-11-13T13:35:31,115 DEBUG [RS_CLOSE_REGION-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 791221fd56c9a0e65b7eca684d5a205f: Waiting for close lock at 1731504931055Running coprocessor pre-close hooks at 1731504931055Disabling compacts and flushes for region at 1731504931055Disabling writes for close at 1731504931056 (+1 ms)Obtaining lock to block concurrent updates at 1731504931057 (+1 ms)Preparing flush snapshotting stores in 791221fd56c9a0e65b7eca684d5a205f at 1731504931057Finished memstore snapshotting TestLogRolling-testLogRollOnDatanodeDeath,,1731504892646.791221fd56c9a0e65b7eca684d5a205f., syncing WAL and waiting on mvcc, flushsize=dataSize=10759, getHeapSize=11760, getOffHeapSize=0, getCellsCount=10 at 1731504931057Flushing stores of TestLogRolling-testLogRollOnDatanodeDeath,,1731504892646.791221fd56c9a0e65b7eca684d5a205f. at 1731504931059 (+2 ms)Flushing 791221fd56c9a0e65b7eca684d5a205f/info: creating writer at 1731504931059Flushing 791221fd56c9a0e65b7eca684d5a205f/info: appending metadata at 1731504931062 (+3 ms)Flushing 791221fd56c9a0e65b7eca684d5a205f/info: closing flushed file at 1731504931062Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@37603f1d: reopening flushed file at 1731504931080 (+18 ms)Finished flush of dataSize ~10.51 KB/10759, heapSize ~11.48 KB/11760, currentSize=0 B/0 for 791221fd56c9a0e65b7eca684d5a205f in 32ms, sequenceid=77, compaction requested=true at 1731504931088 (+8 ms)Writing region close event to WAL at 1731504931109 (+21 ms)Running coprocessor post-close hooks at 1731504931114 (+5 ms)Closed at 1731504931115 (+1 ms) 2024-11-13T13:35:31,115 DEBUG [RS_CLOSE_REGION-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1731504892646.791221fd56c9a0e65b7eca684d5a205f. 2024-11-13T13:35:31,130 DEBUG [RS_CLOSE_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/data/hbase/meta/1588230740/.tmp/ns/ee0758a877d146bc9bd2db153ce10f83 is 43, key is default/ns:d/1731504892390/Put/seqid=0 2024-11-13T13:35:31,133 WARN [Thread-1056 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741902_1088 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:34283 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T13:35:31,133 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1667689267_22 at /127.0.0.1:40030 [Receiving block BP-1828668861-172.17.0.2-1731504888871:blk_1073741902_1088] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/210997d4-e94b-09b4-185f-d9911e45a816/cluster_4b5f2985-1792-e678-40f5-a273020dd5a5/data/data3, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/210997d4-e94b-09b4-185f-d9911e45a816/cluster_4b5f2985-1792-e678-40f5-a273020dd5a5/data/data4]'}, localName='127.0.0.1:33149', datanodeUuid='e06bd827-5889-475e-8fba-56aa016f4c7b', xmitsInProgress=0}:Exception transferring block BP-1828668861-172.17.0.2-1731504888871:blk_1073741902_1088 to mirror 127.0.0.1:34283 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T13:35:31,133 WARN [Thread-1056 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1828668861-172.17.0.2-1731504888871:blk_1073741902_1088 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33149,DS-077399b2-6c0a-44a1-87c0-773de642c11b,DISK], DatanodeInfoWithStorage[127.0.0.1:34283,DS-96fcb019-1c76-495f-b5d6-3eab33ac87a7,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:34283,DS-96fcb019-1c76-495f-b5d6-3eab33ac87a7,DISK]) is bad. 2024-11-13T13:35:31,133 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1667689267_22 at /127.0.0.1:40030 [Receiving block BP-1828668861-172.17.0.2-1731504888871:blk_1073741902_1088] {}] datanode.BlockReceiver(316): Block 1073741902 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-13T13:35:31,133 WARN [Thread-1056 {}] hdfs.DataStreamer(1850): Abandoning BP-1828668861-172.17.0.2-1731504888871:blk_1073741902_1088 2024-11-13T13:35:31,133 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1667689267_22 at /127.0.0.1:40030 [Receiving block BP-1828668861-172.17.0.2-1731504888871:blk_1073741902_1088] {}] datanode.DataXceiver(331): 127.0.0.1:33149:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:40030 dst: /127.0.0.1:33149 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T13:35:31,134 WARN [Thread-1056 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34283,DS-96fcb019-1c76-495f-b5d6-3eab33ac87a7,DISK] 2024-11-13T13:35:31,138 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33931 is added to blk_1073741903_1089 (size=5153) 2024-11-13T13:35:31,138 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33149 is added to blk_1073741903_1089 (size=5153) 2024-11-13T13:35:31,139 INFO [RS_CLOSE_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/data/hbase/meta/1588230740/.tmp/ns/ee0758a877d146bc9bd2db153ce10f83 2024-11-13T13:35:31,154 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,33523,1731504891245/bfeb2336aed7%2C33523%2C1731504891245.1731504914909 to hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/oldWALs/bfeb2336aed7%2C33523%2C1731504891245.1731504914909 2024-11-13T13:35:31,163 DEBUG [RS_CLOSE_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/data/hbase/meta/1588230740/.tmp/table/8f4548f5551c45d19c3820f581075785 is 77, key is TestLogRolling-testLogRollOnDatanodeDeath/table:state/1731504893033/Put/seqid=0 2024-11-13T13:35:31,168 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33931 is added to blk_1073741904_1090 (size=5424) 2024-11-13T13:35:31,168 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33149 is added to blk_1073741904_1090 (size=5424) 2024-11-13T13:35:31,169 INFO [RS_CLOSE_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=146 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/data/hbase/meta/1588230740/.tmp/table/8f4548f5551c45d19c3820f581075785 2024-11-13T13:35:31,176 DEBUG [RS_CLOSE_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/data/hbase/meta/1588230740/.tmp/info/a5553e90c11643bc80134dbce92a01cf as hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/data/hbase/meta/1588230740/info/a5553e90c11643bc80134dbce92a01cf 2024-11-13T13:35:31,182 INFO [RS_CLOSE_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/data/hbase/meta/1588230740/info/a5553e90c11643bc80134dbce92a01cf, entries=10, sequenceid=11, filesize=6.9 K 2024-11-13T13:35:31,183 DEBUG [RS_CLOSE_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/data/hbase/meta/1588230740/.tmp/ns/ee0758a877d146bc9bd2db153ce10f83 as hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/data/hbase/meta/1588230740/ns/ee0758a877d146bc9bd2db153ce10f83 2024-11-13T13:35:31,188 INFO [RS_CLOSE_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/data/hbase/meta/1588230740/ns/ee0758a877d146bc9bd2db153ce10f83, entries=2, sequenceid=11, filesize=5.0 K 2024-11-13T13:35:31,189 DEBUG [RS_CLOSE_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/data/hbase/meta/1588230740/.tmp/table/8f4548f5551c45d19c3820f581075785 as hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/data/hbase/meta/1588230740/table/8f4548f5551c45d19c3820f581075785 2024-11-13T13:35:31,195 INFO [RS_CLOSE_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/data/hbase/meta/1588230740/table/8f4548f5551c45d19c3820f581075785, entries=2, sequenceid=11, filesize=5.3 K 2024-11-13T13:35:31,196 INFO [RS_CLOSE_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.71 KB/1752, heapSize ~3.45 KB/3536, currentSize=0 B/0 for 1588230740 in 138ms, sequenceid=11, compaction requested=false 2024-11-13T13:35:31,201 DEBUG [RS_CLOSE_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-13T13:35:31,202 DEBUG [RS_CLOSE_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-13T13:35:31,202 INFO [RS_CLOSE_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-13T13:35:31,202 DEBUG [RS_CLOSE_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731504931057Running coprocessor pre-close hooks at 1731504931057Disabling compacts and flushes for region at 1731504931057Disabling writes for close at 1731504931058 (+1 ms)Obtaining lock to block concurrent updates at 1731504931058Preparing flush snapshotting stores in 1588230740 at 1731504931058Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1752, getHeapSize=3776, getOffHeapSize=0, getCellsCount=14 at 1731504931058Flushing stores of hbase:meta,,1.1588230740 at 1731504931074 (+16 ms)Flushing 1588230740/info: creating writer at 1731504931074Flushing 1588230740/info: appending metadata at 1731504931090 (+16 ms)Flushing 1588230740/info: closing flushed file at 1731504931090Flushing 1588230740/ns: creating writer at 1731504931117 (+27 ms)Flushing 1588230740/ns: appending metadata at 1731504931130 (+13 ms)Flushing 1588230740/ns: closing flushed file at 1731504931130Flushing 1588230740/table: creating writer at 1731504931145 (+15 ms)Flushing 1588230740/table: appending metadata at 1731504931162 (+17 ms)Flushing 1588230740/table: closing flushed file at 1731504931163 (+1 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@294c99c4: reopening flushed file at 1731504931175 (+12 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@10c9d1db: reopening flushed file at 1731504931182 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2066f9a6: reopening flushed file at 1731504931188 (+6 ms)Finished flush of dataSize ~1.71 KB/1752, heapSize ~3.45 KB/3536, currentSize=0 B/0 for 1588230740 in 138ms, sequenceid=11, compaction requested=false at 1731504931196 (+8 ms)Writing region close event to WAL at 1731504931197 (+1 ms)Running coprocessor post-close hooks at 1731504931202 (+5 ms)Closed at 1731504931202 2024-11-13T13:35:31,202 DEBUG [RS_CLOSE_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-13T13:35:31,257 INFO [RS:0;bfeb2336aed7:33523 {}] regionserver.HRegionServer(976): stopping server bfeb2336aed7,33523,1731504891245; all regions closed. 2024-11-13T13:35:31,258 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:35:31,258 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:35:31,258 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:35:31,258 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:35:31,258 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:35:31,260 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33931 is added to blk_1073741897_1082 (size=825) 2024-11-13T13:35:31,261 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33149 is added to blk_1073741897_1082 (size=825) 2024-11-13T13:35:31,613 INFO [regionserver/bfeb2336aed7:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-13T13:35:31,613 INFO [regionserver/bfeb2336aed7:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-13T13:35:31,760 INFO [regionserver/bfeb2336aed7:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-13T13:35:31,775 INFO [regionserver/bfeb2336aed7:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-13T13:35:31,775 INFO [regionserver/bfeb2336aed7:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-13T13:35:31,948 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath 2024-11-13T13:35:31,948 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-13T13:35:31,949 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-13T13:35:32,429 INFO [master/bfeb2336aed7:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-13T13:35:32,429 INFO [master/bfeb2336aed7:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-13T13:35:32,479 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@3b265c4a[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:33149, datanodeUuid=e06bd827-5889-475e-8fba-56aa016f4c7b, infoPort=41381, infoSecurePort=0, ipcPort=44057, storageInfo=lv=-57;cid=testClusterID;nsid=373864911;c=1731504888871):Failed to transfer BP-1828668861-172.17.0.2-1731504888871:blk_1073741836_1012 to 127.0.0.1:34283 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T13:35:32,481 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33931 is added to blk_1073741832_1008 (size=32) 2024-11-13T13:35:32,616 INFO [regionserver/bfeb2336aed7:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-13T13:35:33,480 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@6a49a834[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:33149, datanodeUuid=e06bd827-5889-475e-8fba-56aa016f4c7b, infoPort=41381, infoSecurePort=0, ipcPort=44057, storageInfo=lv=-57;cid=testClusterID;nsid=373864911;c=1731504888871):Failed to transfer BP-1828668861-172.17.0.2-1731504888871:blk_1073741828_1004 to 127.0.0.1:34283 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T13:35:33,482 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33931 is added to blk_1073741826_1002 (size=42) 2024-11-13T13:35:34,573 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33149 is added to blk_1073741877_1060 (size=11743) 2024-11-13T13:35:35,061 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,38781,1731504892507/bfeb2336aed7%2C38781%2C1731504892507.1731504892747 after 4003ms 2024-11-13T13:35:35,074 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,33523,1731504891245/bfeb2336aed7%2C33523%2C1731504891245.meta.1731504892258.meta after 4001ms 2024-11-13T13:35:35,483 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33931 is added to blk_1073741829_1005 (size=34) 2024-11-13T13:35:35,484 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33931 is added to blk_1073741827_1003 (size=196) 2024-11-13T13:35:36,058 ERROR [WAL-Shutdown-0 {}] wal.AbstractFSWAL(2118): We have waited 5 seconds but the close of async writer doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.fshlog.wait.on.shutdown.seconds" 2024-11-13T13:35:36,062 DEBUG [RS:1;bfeb2336aed7:38781 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/oldWALs 2024-11-13T13:35:36,062 INFO [RS:1;bfeb2336aed7:38781 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog bfeb2336aed7%2C38781%2C1731504892507:(num 1731504892747) 2024-11-13T13:35:36,062 DEBUG [RS:1;bfeb2336aed7:38781 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T13:35:36,062 INFO [RS:1;bfeb2336aed7:38781 {}] regionserver.LeaseManager(133): Closed leases 2024-11-13T13:35:36,063 INFO [RS:1;bfeb2336aed7:38781 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-13T13:35:36,063 INFO [RS:1;bfeb2336aed7:38781 {}] hbase.ChoreService(370): Chore service for: regionserver/bfeb2336aed7:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-13T13:35:36,064 INFO [RS:1;bfeb2336aed7:38781 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-13T13:35:36,064 INFO [regionserver/bfeb2336aed7:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-13T13:35:36,064 INFO [RS:1;bfeb2336aed7:38781 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-13T13:35:36,064 INFO [RS:1;bfeb2336aed7:38781 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-13T13:35:36,064 INFO [RS:1;bfeb2336aed7:38781 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-13T13:35:36,064 INFO [RS:1;bfeb2336aed7:38781 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:38781 2024-11-13T13:35:36,069 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,38781,1731504892507/bfeb2336aed7%2C38781%2C1731504892507.1731504892747 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.FileNotFoundException: File does not exist: /user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,38781,1731504892507/bfeb2336aed7%2C38781%2C1731504892507.1731504892747 at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:87) at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:77) at org.apache.hadoop.hdfs.server.namenode.FSDirStatAndListingOp.isFileClosed(FSDirStatAndListingOp.java:124) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.isFileClosed(FSNamesystem.java:3502) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.isFileClosed(NameNodeRpcServer.java:1248) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.isFileClosed(ClientNamenodeProtocolServerSideTranslatorPB.java:1419) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.ipc.RemoteException.instantiateException(RemoteException.java:121) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.RemoteException.unwrapRemoteException(RemoteException.java:88) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1812) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more Caused by: org.apache.hadoop.ipc.RemoteException: File does not exist: /user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,38781,1731504892507/bfeb2336aed7%2C38781%2C1731504892507.1731504892747 at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:87) at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:77) at org.apache.hadoop.hdfs.server.namenode.FSDirStatAndListingOp.isFileClosed(FSDirStatAndListingOp.java:124) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.isFileClosed(FSNamesystem.java:3502) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.isFileClosed(NameNodeRpcServer.java:1248) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.isFileClosed(ClientNamenodeProtocolServerSideTranslatorPB.java:1419) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.isFileClosed(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$isFileClosed$57(ClientNamenodeProtocolTranslatorPB.java:999) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.isFileClosed(ClientNamenodeProtocolTranslatorPB.java:999) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.isFileClosed(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.isFileClosed(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.isFileClosed(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1810) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:35:36,116 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:35:36,133 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38781-0x1013467e0590002, quorum=127.0.0.1:56840, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/bfeb2336aed7,38781,1731504892507 2024-11-13T13:35:36,133 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39113-0x1013467e0590000, quorum=127.0.0.1:56840, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-13T13:35:36,133 INFO [RS:1;bfeb2336aed7:38781 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-13T13:35:36,137 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:35:36,137 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:35:36,138 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:35:36,138 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:35:36,138 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:35:36,146 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [bfeb2336aed7,38781,1731504892507] 2024-11-13T13:35:36,156 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:35:36,157 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:35:36,157 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/bfeb2336aed7,38781,1731504892507 already deleted, retry=false 2024-11-13T13:35:36,157 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; bfeb2336aed7,38781,1731504892507 expired; onlineServers=1 2024-11-13T13:35:36,247 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38781-0x1013467e0590002, quorum=127.0.0.1:56840, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-13T13:35:36,247 INFO [RS:1;bfeb2336aed7:38781 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-13T13:35:36,247 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38781-0x1013467e0590002, quorum=127.0.0.1:56840, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-13T13:35:36,247 INFO [RS:1;bfeb2336aed7:38781 {}] regionserver.HRegionServer(1031): Exiting; stopping=bfeb2336aed7,38781,1731504892507; zookeeper connection closed. 2024-11-13T13:35:36,247 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@2af6090d {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@2af6090d 2024-11-13T13:35:36,259 ERROR [WAL-Shutdown-0 {}] wal.AbstractFSWAL(2118): We have waited 5 seconds but the close of async writer doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.fshlog.wait.on.shutdown.seconds" 2024-11-13T13:35:36,266 DEBUG [RS:0;bfeb2336aed7:33523 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/oldWALs 2024-11-13T13:35:36,267 INFO [RS:0;bfeb2336aed7:33523 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog bfeb2336aed7%2C33523%2C1731504891245.meta:.meta(num 1731504931059) 2024-11-13T13:35:36,268 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:35:36,268 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:35:36,268 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:35:36,269 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:35:36,269 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:35:36,273 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33931 is added to blk_1073741893_1077 (size=14682) 2024-11-13T13:35:36,273 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33149 is added to blk_1073741893_1077 (size=14682) 2024-11-13T13:35:36,276 DEBUG [RS:0;bfeb2336aed7:33523 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/oldWALs 2024-11-13T13:35:36,276 INFO [RS:0;bfeb2336aed7:33523 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog bfeb2336aed7%2C33523%2C1731504891245:(num 1731504930736) 2024-11-13T13:35:36,276 DEBUG [RS:0;bfeb2336aed7:33523 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T13:35:36,277 INFO [RS:0;bfeb2336aed7:33523 {}] regionserver.LeaseManager(133): Closed leases 2024-11-13T13:35:36,277 INFO [RS:0;bfeb2336aed7:33523 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-13T13:35:36,277 INFO [RS:0;bfeb2336aed7:33523 {}] hbase.ChoreService(370): Chore service for: regionserver/bfeb2336aed7:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-13T13:35:36,277 INFO [RS:0;bfeb2336aed7:33523 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-13T13:35:36,277 INFO [regionserver/bfeb2336aed7:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-13T13:35:36,277 INFO [RS:0;bfeb2336aed7:33523 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:33523 2024-11-13T13:35:36,283 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39113-0x1013467e0590000, quorum=127.0.0.1:56840, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-13T13:35:36,283 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33523-0x1013467e0590001, quorum=127.0.0.1:56840, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/bfeb2336aed7,33523,1731504891245 2024-11-13T13:35:36,283 INFO [RS:0;bfeb2336aed7:33523 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-13T13:35:36,294 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [bfeb2336aed7,33523,1731504891245] 2024-11-13T13:35:36,304 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/bfeb2336aed7,33523,1731504891245 already deleted, retry=false 2024-11-13T13:35:36,304 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; bfeb2336aed7,33523,1731504891245 expired; onlineServers=0 2024-11-13T13:35:36,304 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'bfeb2336aed7,39113,1731504891075' ***** 2024-11-13T13:35:36,304 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-13T13:35:36,305 INFO [M:0;bfeb2336aed7:39113 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-13T13:35:36,305 INFO [M:0;bfeb2336aed7:39113 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-13T13:35:36,305 DEBUG [M:0;bfeb2336aed7:39113 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-13T13:35:36,305 DEBUG [M:0;bfeb2336aed7:39113 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-13T13:35:36,305 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-13T13:35:36,305 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster-HFileCleaner.large.0-1731504891609 {}] cleaner.HFileCleaner(306): Exit Thread[master/bfeb2336aed7:0:becomeActiveMaster-HFileCleaner.large.0-1731504891609,5,FailOnTimeoutGroup] 2024-11-13T13:35:36,306 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster-HFileCleaner.small.0-1731504891610 {}] cleaner.HFileCleaner(306): Exit Thread[master/bfeb2336aed7:0:becomeActiveMaster-HFileCleaner.small.0-1731504891610,5,FailOnTimeoutGroup] 2024-11-13T13:35:36,306 INFO [M:0;bfeb2336aed7:39113 {}] hbase.ChoreService(370): Chore service for: master/bfeb2336aed7:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-13T13:35:36,306 INFO [M:0;bfeb2336aed7:39113 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-13T13:35:36,307 DEBUG [M:0;bfeb2336aed7:39113 {}] master.HMaster(1795): Stopping service threads 2024-11-13T13:35:36,307 INFO [M:0;bfeb2336aed7:39113 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-13T13:35:36,307 INFO [M:0;bfeb2336aed7:39113 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-13T13:35:36,307 INFO [M:0;bfeb2336aed7:39113 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-13T13:35:36,308 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-13T13:35:36,315 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39113-0x1013467e0590000, quorum=127.0.0.1:56840, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-13T13:35:36,315 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39113-0x1013467e0590000, quorum=127.0.0.1:56840, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T13:35:36,315 DEBUG [M:0;bfeb2336aed7:39113 {}] zookeeper.ZKUtil(347): master:39113-0x1013467e0590000, quorum=127.0.0.1:56840, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-13T13:35:36,315 WARN [M:0;bfeb2336aed7:39113 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-13T13:35:36,316 INFO [M:0;bfeb2336aed7:39113 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/.lastflushedseqids 2024-11-13T13:35:36,327 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33149 is added to blk_1073741905_1091 (size=130) 2024-11-13T13:35:36,327 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33931 is added to blk_1073741905_1091 (size=130) 2024-11-13T13:35:36,328 INFO [M:0;bfeb2336aed7:39113 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-13T13:35:36,328 INFO [M:0;bfeb2336aed7:39113 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-13T13:35:36,328 DEBUG [M:0;bfeb2336aed7:39113 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-13T13:35:36,328 INFO [M:0;bfeb2336aed7:39113 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T13:35:36,329 DEBUG [M:0;bfeb2336aed7:39113 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T13:35:36,329 DEBUG [M:0;bfeb2336aed7:39113 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-13T13:35:36,329 DEBUG [M:0;bfeb2336aed7:39113 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T13:35:36,329 INFO [M:0;bfeb2336aed7:39113 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.25 KB heapSize=29.49 KB 2024-11-13T13:35:36,346 DEBUG [M:0;bfeb2336aed7:39113 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/b76ea90143c84eaba6820e9efff8573b is 82, key is hbase:meta,,1/info:regioninfo/1731504892292/Put/seqid=0 2024-11-13T13:35:36,350 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33931 is added to blk_1073741906_1092 (size=5672) 2024-11-13T13:35:36,350 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33149 is added to blk_1073741906_1092 (size=5672) 2024-11-13T13:35:36,351 INFO [M:0;bfeb2336aed7:39113 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/b76ea90143c84eaba6820e9efff8573b 2024-11-13T13:35:36,370 DEBUG [M:0;bfeb2336aed7:39113 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/b1189be9370c496e8b9e254c6550ad7b is 774, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1731504893040/Put/seqid=0 2024-11-13T13:35:36,375 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33931 is added to blk_1073741907_1093 (size=6255) 2024-11-13T13:35:36,375 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33149 is added to blk_1073741907_1093 (size=6255) 2024-11-13T13:35:36,375 INFO [M:0;bfeb2336aed7:39113 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.58 KB at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/b1189be9370c496e8b9e254c6550ad7b 2024-11-13T13:35:36,380 INFO [M:0;bfeb2336aed7:39113 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for b1189be9370c496e8b9e254c6550ad7b 2024-11-13T13:35:36,394 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33523-0x1013467e0590001, quorum=127.0.0.1:56840, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-13T13:35:36,394 INFO [RS:0;bfeb2336aed7:33523 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-13T13:35:36,394 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33523-0x1013467e0590001, quorum=127.0.0.1:56840, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-13T13:35:36,394 INFO [RS:0;bfeb2336aed7:33523 {}] regionserver.HRegionServer(1031): Exiting; stopping=bfeb2336aed7,33523,1731504891245; zookeeper connection closed. 2024-11-13T13:35:36,394 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@28e31923 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@28e31923 2024-11-13T13:35:36,394 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 2 regionserver(s) complete 2024-11-13T13:35:36,396 DEBUG [M:0;bfeb2336aed7:39113 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/7afd3ffa80ad41c68b8148a09713e61a is 69, key is bfeb2336aed7,33523,1731504891245/rs:state/1731504891719/Put/seqid=0 2024-11-13T13:35:36,401 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33149 is added to blk_1073741908_1094 (size=5224) 2024-11-13T13:35:36,401 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33931 is added to blk_1073741908_1094 (size=5224) 2024-11-13T13:35:36,401 INFO [M:0;bfeb2336aed7:39113 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=130 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/7afd3ffa80ad41c68b8148a09713e61a 2024-11-13T13:35:36,423 DEBUG [M:0;bfeb2336aed7:39113 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/785ca4c1c375480f8cc8074adf673945 is 52, key is load_balancer_on/state:d/1731504892489/Put/seqid=0 2024-11-13T13:35:36,428 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33931 is added to blk_1073741909_1095 (size=5056) 2024-11-13T13:35:36,428 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33149 is added to blk_1073741909_1095 (size=5056) 2024-11-13T13:35:36,429 INFO [M:0;bfeb2336aed7:39113 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/785ca4c1c375480f8cc8074adf673945 2024-11-13T13:35:36,434 DEBUG [M:0;bfeb2336aed7:39113 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/b76ea90143c84eaba6820e9efff8573b as hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/b76ea90143c84eaba6820e9efff8573b 2024-11-13T13:35:36,439 INFO [M:0;bfeb2336aed7:39113 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/b76ea90143c84eaba6820e9efff8573b, entries=8, sequenceid=60, filesize=5.5 K 2024-11-13T13:35:36,440 DEBUG [M:0;bfeb2336aed7:39113 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/b1189be9370c496e8b9e254c6550ad7b as hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/b1189be9370c496e8b9e254c6550ad7b 2024-11-13T13:35:36,444 INFO [M:0;bfeb2336aed7:39113 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for b1189be9370c496e8b9e254c6550ad7b 2024-11-13T13:35:36,445 INFO [M:0;bfeb2336aed7:39113 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/b1189be9370c496e8b9e254c6550ad7b, entries=6, sequenceid=60, filesize=6.1 K 2024-11-13T13:35:36,445 DEBUG [M:0;bfeb2336aed7:39113 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/7afd3ffa80ad41c68b8148a09713e61a as hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/7afd3ffa80ad41c68b8148a09713e61a 2024-11-13T13:35:36,450 INFO [M:0;bfeb2336aed7:39113 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/7afd3ffa80ad41c68b8148a09713e61a, entries=2, sequenceid=60, filesize=5.1 K 2024-11-13T13:35:36,451 DEBUG [M:0;bfeb2336aed7:39113 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/785ca4c1c375480f8cc8074adf673945 as hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/785ca4c1c375480f8cc8074adf673945 2024-11-13T13:35:36,455 INFO [M:0;bfeb2336aed7:39113 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/785ca4c1c375480f8cc8074adf673945, entries=1, sequenceid=60, filesize=4.9 K 2024-11-13T13:35:36,457 INFO [M:0;bfeb2336aed7:39113 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.25 KB/23805, heapSize ~29.43 KB/30136, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 127ms, sequenceid=60, compaction requested=false 2024-11-13T13:35:36,458 INFO [M:0;bfeb2336aed7:39113 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T13:35:36,458 DEBUG [M:0;bfeb2336aed7:39113 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731504936328Disabling compacts and flushes for region at 1731504936328Disabling writes for close at 1731504936329 (+1 ms)Obtaining lock to block concurrent updates at 1731504936329Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731504936329Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23805, getHeapSize=30136, getOffHeapSize=0, getCellsCount=71 at 1731504936330 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731504936331 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731504936331Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731504936346 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731504936346Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731504936356 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731504936369 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731504936369Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731504936380 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731504936396 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731504936396Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731504936407 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731504936422 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731504936422Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@60c6af73: reopening flushed file at 1731504936433 (+11 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@75b58cc4: reopening flushed file at 1731504936439 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@726e54ec: reopening flushed file at 1731504936445 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@444032a6: reopening flushed file at 1731504936450 (+5 ms)Finished flush of dataSize ~23.25 KB/23805, heapSize ~29.43 KB/30136, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 127ms, sequenceid=60, compaction requested=false at 1731504936457 (+7 ms)Writing region close event to WAL at 1731504936458 (+1 ms)Closed at 1731504936458 2024-11-13T13:35:36,459 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:35:36,459 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:35:36,459 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:35:36,459 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:35:36,459 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:35:36,461 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33931 is added to blk_1073741891_1074 (size=1045) 2024-11-13T13:35:36,461 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33149 is added to blk_1073741891_1074 (size=1045) 2024-11-13T13:35:36,482 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33931 is added to blk_1073741825_1001 (size=7) 2024-11-13T13:35:36,660 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-13T13:35:36,689 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:35:36,690 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:35:36,690 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:35:36,691 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:35:36,691 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:35:36,694 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:35:36,695 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:35:36,697 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:35:37,070 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,38781,1731504892507/bfeb2336aed7%2C38781%2C1731504892507.1731504892747 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:35:37,075 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,33523,1731504891245/bfeb2336aed7%2C33523%2C1731504891245.meta.1731504892258.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:35:37,507 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@2a06b517 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-1828668861-172.17.0.2-1731504888871:blk_1073741830_1006, datanode=DatanodeInfoWithStorage[127.0.0.1:35467,null,null]) java.net.ConnectException: Call From bfeb2336aed7/172.17.0.2 to localhost:38563 failed on connection exception: java.net.ConnectException: Connection refused; For more details see: http://wiki.apache.org/hadoop/ConnectionRefused at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:876) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:668) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-11-13T13:35:37,573 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33149 is added to blk_1073741838_1020 (size=2431) 2024-11-13T13:35:37,645 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/MasterData/WALs/bfeb2336aed7,39113,1731504891075/bfeb2336aed7%2C39113%2C1731504891075.1731504891382 to hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/MasterData/oldWALs/bfeb2336aed7%2C39113%2C1731504891075.1731504891382 2024-11-13T13:35:37,654 INFO [WAL-Archive-0 {}] region.MasterRegionUtils(50): Moved hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/MasterData/oldWALs/bfeb2336aed7%2C39113%2C1731504891075.1731504891382 to hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/oldWALs/bfeb2336aed7%2C39113%2C1731504891075.1731504891382$masterlocalwal$ 2024-11-13T13:35:37,654 INFO [M:0;bfeb2336aed7:39113 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-13T13:35:37,654 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-13T13:35:37,655 INFO [M:0;bfeb2336aed7:39113 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:39113 2024-11-13T13:35:37,655 INFO [M:0;bfeb2336aed7:39113 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-13T13:35:37,823 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39113-0x1013467e0590000, quorum=127.0.0.1:56840, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-13T13:35:37,823 INFO [M:0;bfeb2336aed7:39113 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-13T13:35:37,824 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39113-0x1013467e0590000, quorum=127.0.0.1:56840, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-13T13:35:37,829 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@561386dd{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-13T13:35:37,830 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6c8ca2dc{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-13T13:35:37,830 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-13T13:35:37,830 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1542e930{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-13T13:35:37,830 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6842affb{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/210997d4-e94b-09b4-185f-d9911e45a816/hadoop.log.dir/,STOPPED} 2024-11-13T13:35:37,833 WARN [BP-1828668861-172.17.0.2-1731504888871 heartbeating to localhost/127.0.0.1:40141 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-13T13:35:37,833 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-13T13:35:37,833 WARN [BP-1828668861-172.17.0.2-1731504888871 heartbeating to localhost/127.0.0.1:40141 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1828668861-172.17.0.2-1731504888871 (Datanode Uuid e06bd827-5889-475e-8fba-56aa016f4c7b) service to localhost/127.0.0.1:40141 2024-11-13T13:35:37,833 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-13T13:35:37,832 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@52ace095 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-1828668861-172.17.0.2-1731504888871:blk_1073741837_1013, datanode=DatanodeInfoWithStorage[127.0.0.1:35467,null,null]) java.io.InterruptedIOException: DestHost:destPort localhost:38563 , LocalHost:localPort bfeb2336aed7/172.17.0.2:0. Failed on local exception: java.io.InterruptedIOException: Interrupted: action=RetryAction(action=RETRY, delayMillis=1000, reason=retries get failed due to exceeded maximum allowed retries number: 10), retry policy=RetryUpToMaximumCountWithFixedSleep(maxRetries=10, sleepTime=1000 MILLISECONDS) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:936) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.InterruptedIOException: Interrupted: action=RetryAction(action=RETRY, delayMillis=1000, reason=retries get failed due to exceeded maximum allowed retries number: 10), retry policy=RetryUpToMaximumCountWithFixedSleep(maxRetries=10, sleepTime=1000 MILLISECONDS) at org.apache.hadoop.ipc.Client$Connection.handleConnectionFailure(Client.java:963) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:691) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more Caused by: java.lang.InterruptedException: sleep interrupted at java.lang.Thread.sleep(Native Method) ~[?:?] at org.apache.hadoop.ipc.Client$Connection.handleConnectionFailure(Client.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:691) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-11-13T13:35:37,833 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@52ace095 {}] datanode.BlockRecoveryWorker$1(605): recover Block: RecoveringBlock{BP-1828668861-172.17.0.2-1731504888871:blk_1073741837_1013; getBlockSize()=85; corrupt=false; offset=-1; locs=[DatanodeInfoWithStorage[127.0.0.1:33149,null,null], DatanodeInfoWithStorage[127.0.0.1:35467,null,null]]; cachedLocs=[]} FAILED: java.io.IOException: No block pool offer service for bpid=BP-1828668861-172.17.0.2-1731504888871 2024-11-13T13:35:37,834 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@52ace095 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-1828668861-172.17.0.2-1731504888871:blk_1073741834_1010, datanode=DatanodeInfoWithStorage[127.0.0.1:35467,null,null]) java.io.IOException: No block pool offer service for bpid=BP-1828668861-172.17.0.2-1731504888871 at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.getDatanodeID(BlockRecoveryWorker.java:539) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$000(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:131) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T13:35:37,834 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@52ace095 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-1828668861-172.17.0.2-1731504888871:blk_1073741834_1010, datanode=DatanodeInfoWithStorage[127.0.0.1:33149,null,null]) java.io.IOException: No block pool offer service for bpid=BP-1828668861-172.17.0.2-1731504888871 at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.getDatanodeID(BlockRecoveryWorker.java:539) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$000(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:131) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T13:35:37,834 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/210997d4-e94b-09b4-185f-d9911e45a816/cluster_4b5f2985-1792-e678-40f5-a273020dd5a5/data/data3/current/BP-1828668861-172.17.0.2-1731504888871 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-13T13:35:37,834 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@52ace095 {}] datanode.BlockRecoveryWorker$1(605): recover Block: RecoveringBlock{BP-1828668861-172.17.0.2-1731504888871:blk_1073741834_1010; getBlockSize()=85; corrupt=false; offset=-1; locs=[DatanodeInfoWithStorage[127.0.0.1:35467,null,null], DatanodeInfoWithStorage[127.0.0.1:33149,null,null]]; cachedLocs=[]} FAILED: java.io.IOException: All datanodes failed: block=BP-1828668861-172.17.0.2-1731504888871:blk_1073741834_1010, datanodeids=[DatanodeInfoWithStorage[127.0.0.1:35467,null,null], DatanodeInfoWithStorage[127.0.0.1:33149,null,null]] 2024-11-13T13:35:37,835 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/210997d4-e94b-09b4-185f-d9911e45a816/cluster_4b5f2985-1792-e678-40f5-a273020dd5a5/data/data4/current/BP-1828668861-172.17.0.2-1731504888871 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-13T13:35:37,835 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-13T13:35:37,837 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@15010086{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-13T13:35:37,837 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@7ab1ed71{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-13T13:35:37,837 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-13T13:35:37,837 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5c0b8b07{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-13T13:35:37,837 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2137ebee{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/210997d4-e94b-09b4-185f-d9911e45a816/hadoop.log.dir/,STOPPED} 2024-11-13T13:35:37,839 WARN [BP-1828668861-172.17.0.2-1731504888871 heartbeating to localhost/127.0.0.1:40141 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-13T13:35:37,839 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-13T13:35:37,839 WARN [BP-1828668861-172.17.0.2-1731504888871 heartbeating to localhost/127.0.0.1:40141 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1828668861-172.17.0.2-1731504888871 (Datanode Uuid 8f99e9e2-208b-40da-abea-6371320ca5d9) service to localhost/127.0.0.1:40141 2024-11-13T13:35:37,839 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-13T13:35:37,839 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/210997d4-e94b-09b4-185f-d9911e45a816/cluster_4b5f2985-1792-e678-40f5-a273020dd5a5/data/data5/current/BP-1828668861-172.17.0.2-1731504888871 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-13T13:35:37,840 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/210997d4-e94b-09b4-185f-d9911e45a816/cluster_4b5f2985-1792-e678-40f5-a273020dd5a5/data/data6/current/BP-1828668861-172.17.0.2-1731504888871 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-13T13:35:37,840 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-13T13:35:37,846 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7eee535{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-13T13:35:37,846 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@589d0492{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-13T13:35:37,846 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-13T13:35:37,846 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@38184680{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-13T13:35:37,847 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@8f6e525{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/210997d4-e94b-09b4-185f-d9911e45a816/hadoop.log.dir/,STOPPED} 2024-11-13T13:35:37,854 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-13T13:35:37,883 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-13T13:35:37,891 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnDatanodeDeath Thread=158 (was 83) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-5-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-5-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-9-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-20-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-21-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-20-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-7-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-7-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:40141 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-6 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.2@localhost:40141 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-11 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Command processor java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-10 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-15-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-7 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.3@localhost:40141 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-20-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-9-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-15-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:40141 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:40141 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-14-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:40141 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-14-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:40141 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:40141 from jenkins.hfs.2 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-21-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-9 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:46753 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-8 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-5-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:40141 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-7-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Close-WAL-Writer-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:175) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$900/0x00007f60bcbf4928.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-14-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Timer for 'DataNode' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: LeaseRenewer:jenkins.hfs.1@localhost:46753 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:40141 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-15-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-21-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Close-WAL-Writer-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:175) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$900/0x00007f60bcbf4928.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-5 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:40141 from jenkins.hfs.3 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) - Thread LEAK? -, OpenFileDescriptor=448 (was 402) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=215 (was 206) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=4002 (was 4200) 2024-11-13T13:35:37,898 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnPipelineRestart Thread=158, OpenFileDescriptor=448, MaxFileDescriptor=1048576, SystemLoadAverage=215, ProcessCount=11, AvailableMemoryMB=4002 2024-11-13T13:35:37,898 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-13T13:35:37,898 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/210997d4-e94b-09b4-185f-d9911e45a816/hadoop.log.dir so I do NOT create it in target/test-data/1bfad055-190e-7fc9-ff21-38a64540517f 2024-11-13T13:35:37,898 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/210997d4-e94b-09b4-185f-d9911e45a816/hadoop.tmp.dir so I do NOT create it in target/test-data/1bfad055-190e-7fc9-ff21-38a64540517f 2024-11-13T13:35:37,898 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1bfad055-190e-7fc9-ff21-38a64540517f/cluster_7d13f452-3b8c-c473-f020-215ab3c51fcd, deleteOnExit=true 2024-11-13T13:35:37,898 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-13T13:35:37,898 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1bfad055-190e-7fc9-ff21-38a64540517f/test.cache.data in system properties and HBase conf 2024-11-13T13:35:37,898 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1bfad055-190e-7fc9-ff21-38a64540517f/hadoop.tmp.dir in system properties and HBase conf 2024-11-13T13:35:37,899 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1bfad055-190e-7fc9-ff21-38a64540517f/hadoop.log.dir in system properties and HBase conf 2024-11-13T13:35:37,899 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1bfad055-190e-7fc9-ff21-38a64540517f/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-13T13:35:37,899 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1bfad055-190e-7fc9-ff21-38a64540517f/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-13T13:35:37,899 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-13T13:35:37,899 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-13T13:35:37,899 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1bfad055-190e-7fc9-ff21-38a64540517f/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-13T13:35:37,899 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1bfad055-190e-7fc9-ff21-38a64540517f/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-13T13:35:37,899 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1bfad055-190e-7fc9-ff21-38a64540517f/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-13T13:35:37,899 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1bfad055-190e-7fc9-ff21-38a64540517f/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-13T13:35:37,899 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1bfad055-190e-7fc9-ff21-38a64540517f/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-13T13:35:37,899 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1bfad055-190e-7fc9-ff21-38a64540517f/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-13T13:35:37,899 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1bfad055-190e-7fc9-ff21-38a64540517f/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-13T13:35:37,899 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1bfad055-190e-7fc9-ff21-38a64540517f/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-13T13:35:37,899 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1bfad055-190e-7fc9-ff21-38a64540517f/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-13T13:35:37,900 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1bfad055-190e-7fc9-ff21-38a64540517f/nfs.dump.dir in system properties and HBase conf 2024-11-13T13:35:37,900 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1bfad055-190e-7fc9-ff21-38a64540517f/java.io.tmpdir in system properties and HBase conf 2024-11-13T13:35:37,900 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1bfad055-190e-7fc9-ff21-38a64540517f/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-13T13:35:37,900 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1bfad055-190e-7fc9-ff21-38a64540517f/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-13T13:35:37,900 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1bfad055-190e-7fc9-ff21-38a64540517f/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-13T13:35:37,912 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-13T13:35:38,071 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,38781,1731504892507/bfeb2336aed7%2C38781%2C1731504892507.1731504892747 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:35:38,076 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,33523,1731504891245/bfeb2336aed7%2C33523%2C1731504891245.meta.1731504892258.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:35:38,291 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-13T13:35:38,295 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-13T13:35:38,296 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-13T13:35:38,296 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-13T13:35:38,296 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-13T13:35:38,296 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-13T13:35:38,297 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@25a29a07{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1bfad055-190e-7fc9-ff21-38a64540517f/hadoop.log.dir/,AVAILABLE} 2024-11-13T13:35:38,297 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7096145a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-13T13:35:38,397 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@49ef22be{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1bfad055-190e-7fc9-ff21-38a64540517f/java.io.tmpdir/jetty-localhost-32911-hadoop-hdfs-3_4_1-tests_jar-_-any-13002753741580498361/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-13T13:35:38,398 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@1b230242{HTTP/1.1, (http/1.1)}{localhost:32911} 2024-11-13T13:35:38,398 INFO [Time-limited test {}] server.Server(415): Started @153085ms 2024-11-13T13:35:38,409 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-13T13:35:38,716 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-13T13:35:38,720 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-13T13:35:38,720 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-13T13:35:38,721 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-13T13:35:38,721 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-13T13:35:38,721 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1fc8bed8{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1bfad055-190e-7fc9-ff21-38a64540517f/hadoop.log.dir/,AVAILABLE} 2024-11-13T13:35:38,722 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@13646a74{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-13T13:35:38,819 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5fb42f9a{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1bfad055-190e-7fc9-ff21-38a64540517f/java.io.tmpdir/jetty-localhost-40309-hadoop-hdfs-3_4_1-tests_jar-_-any-16217357714967723788/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-13T13:35:38,819 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6d74671d{HTTP/1.1, (http/1.1)}{localhost:40309} 2024-11-13T13:35:38,820 INFO [Time-limited test {}] server.Server(415): Started @153508ms 2024-11-13T13:35:38,822 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-13T13:35:38,857 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-13T13:35:38,860 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-13T13:35:38,861 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-13T13:35:38,861 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-13T13:35:38,861 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-13T13:35:38,862 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@64434c96{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1bfad055-190e-7fc9-ff21-38a64540517f/hadoop.log.dir/,AVAILABLE} 2024-11-13T13:35:38,862 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@47946b20{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-13T13:35:38,955 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@37687418{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1bfad055-190e-7fc9-ff21-38a64540517f/java.io.tmpdir/jetty-localhost-33561-hadoop-hdfs-3_4_1-tests_jar-_-any-5204757067383541863/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-13T13:35:38,956 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@e7f214b{HTTP/1.1, (http/1.1)}{localhost:33561} 2024-11-13T13:35:38,956 INFO [Time-limited test {}] server.Server(415): Started @153643ms 2024-11-13T13:35:38,957 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-13T13:35:39,072 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,38781,1731504892507/bfeb2336aed7%2C38781%2C1731504892507.1731504892747 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:35:39,077 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,33523,1731504891245/bfeb2336aed7%2C33523%2C1731504891245.meta.1731504892258.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:35:40,074 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,38781,1731504892507/bfeb2336aed7%2C38781%2C1731504892507.1731504892747 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:35:40,078 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,33523,1731504891245/bfeb2336aed7%2C33523%2C1731504891245.meta.1731504892258.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:35:40,253 WARN [Thread-1202 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1bfad055-190e-7fc9-ff21-38a64540517f/cluster_7d13f452-3b8c-c473-f020-215ab3c51fcd/data/data1/current/BP-1348924368-172.17.0.2-1731504937923/current, will proceed with Du for space computation calculation, 2024-11-13T13:35:40,253 WARN [Thread-1203 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1bfad055-190e-7fc9-ff21-38a64540517f/cluster_7d13f452-3b8c-c473-f020-215ab3c51fcd/data/data2/current/BP-1348924368-172.17.0.2-1731504937923/current, will proceed with Du for space computation calculation, 2024-11-13T13:35:40,273 WARN [Thread-1166 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-13T13:35:40,276 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xfb98010de728ce13 with lease ID 0x2b905e81678a495b: Processing first storage report for DS-ab9a2161-65c5-434f-8fcb-2eae2752a222 from datanode DatanodeRegistration(127.0.0.1:36967, datanodeUuid=0cea774c-18a7-48aa-896e-30dea8c1d062, infoPort=43589, infoSecurePort=0, ipcPort=44447, storageInfo=lv=-57;cid=testClusterID;nsid=1427826847;c=1731504937923) 2024-11-13T13:35:40,276 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xfb98010de728ce13 with lease ID 0x2b905e81678a495b: from storage DS-ab9a2161-65c5-434f-8fcb-2eae2752a222 node DatanodeRegistration(127.0.0.1:36967, datanodeUuid=0cea774c-18a7-48aa-896e-30dea8c1d062, infoPort=43589, infoSecurePort=0, ipcPort=44447, storageInfo=lv=-57;cid=testClusterID;nsid=1427826847;c=1731504937923), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-13T13:35:40,276 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xfb98010de728ce13 with lease ID 0x2b905e81678a495b: Processing first storage report for DS-657cc954-4f23-4958-bcf2-ac3127d0b3fb from datanode DatanodeRegistration(127.0.0.1:36967, datanodeUuid=0cea774c-18a7-48aa-896e-30dea8c1d062, infoPort=43589, infoSecurePort=0, ipcPort=44447, storageInfo=lv=-57;cid=testClusterID;nsid=1427826847;c=1731504937923) 2024-11-13T13:35:40,276 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xfb98010de728ce13 with lease ID 0x2b905e81678a495b: from storage DS-657cc954-4f23-4958-bcf2-ac3127d0b3fb node DatanodeRegistration(127.0.0.1:36967, datanodeUuid=0cea774c-18a7-48aa-896e-30dea8c1d062, infoPort=43589, infoSecurePort=0, ipcPort=44447, storageInfo=lv=-57;cid=testClusterID;nsid=1427826847;c=1731504937923), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-13T13:35:40,393 WARN [Thread-1213 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1bfad055-190e-7fc9-ff21-38a64540517f/cluster_7d13f452-3b8c-c473-f020-215ab3c51fcd/data/data3/current/BP-1348924368-172.17.0.2-1731504937923/current, will proceed with Du for space computation calculation, 2024-11-13T13:35:40,393 WARN [Thread-1214 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1bfad055-190e-7fc9-ff21-38a64540517f/cluster_7d13f452-3b8c-c473-f020-215ab3c51fcd/data/data4/current/BP-1348924368-172.17.0.2-1731504937923/current, will proceed with Du for space computation calculation, 2024-11-13T13:35:40,413 WARN [Thread-1189 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-13T13:35:40,415 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x1258c8f774dfb51b with lease ID 0x2b905e81678a495c: Processing first storage report for DS-6e6cf97b-09b5-430b-92bc-37ad8ab6214f from datanode DatanodeRegistration(127.0.0.1:42613, datanodeUuid=3a717171-4bca-4788-8f6a-d577ec78ab7f, infoPort=45975, infoSecurePort=0, ipcPort=40743, storageInfo=lv=-57;cid=testClusterID;nsid=1427826847;c=1731504937923) 2024-11-13T13:35:40,415 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x1258c8f774dfb51b with lease ID 0x2b905e81678a495c: from storage DS-6e6cf97b-09b5-430b-92bc-37ad8ab6214f node DatanodeRegistration(127.0.0.1:42613, datanodeUuid=3a717171-4bca-4788-8f6a-d577ec78ab7f, infoPort=45975, infoSecurePort=0, ipcPort=40743, storageInfo=lv=-57;cid=testClusterID;nsid=1427826847;c=1731504937923), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-13T13:35:40,415 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x1258c8f774dfb51b with lease ID 0x2b905e81678a495c: Processing first storage report for DS-0c2e36f5-bdf3-40e8-91b1-b4a686944100 from datanode DatanodeRegistration(127.0.0.1:42613, datanodeUuid=3a717171-4bca-4788-8f6a-d577ec78ab7f, infoPort=45975, infoSecurePort=0, ipcPort=40743, storageInfo=lv=-57;cid=testClusterID;nsid=1427826847;c=1731504937923) 2024-11-13T13:35:40,415 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x1258c8f774dfb51b with lease ID 0x2b905e81678a495c: from storage DS-0c2e36f5-bdf3-40e8-91b1-b4a686944100 node DatanodeRegistration(127.0.0.1:42613, datanodeUuid=3a717171-4bca-4788-8f6a-d577ec78ab7f, infoPort=45975, infoSecurePort=0, ipcPort=40743, storageInfo=lv=-57;cid=testClusterID;nsid=1427826847;c=1731504937923), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-13T13:35:40,500 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1bfad055-190e-7fc9-ff21-38a64540517f 2024-11-13T13:35:40,503 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1bfad055-190e-7fc9-ff21-38a64540517f/cluster_7d13f452-3b8c-c473-f020-215ab3c51fcd/zookeeper_0, clientPort=53902, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1bfad055-190e-7fc9-ff21-38a64540517f/cluster_7d13f452-3b8c-c473-f020-215ab3c51fcd/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1bfad055-190e-7fc9-ff21-38a64540517f/cluster_7d13f452-3b8c-c473-f020-215ab3c51fcd/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-13T13:35:40,504 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=53902 2024-11-13T13:35:40,504 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T13:35:40,505 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T13:35:40,514 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42613 is added to blk_1073741825_1001 (size=7) 2024-11-13T13:35:40,514 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36967 is added to blk_1073741825_1001 (size=7) 2024-11-13T13:35:40,516 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:46467/user/jenkins/test-data/02132352-6242-17a1-f9d1-08ae046913f2 with version=8 2024-11-13T13:35:40,516 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:38779/user/jenkins/test-data/52d71c14-a597-0de0-63d6-b75f134ce0ba/hbase-staging 2024-11-13T13:35:40,518 INFO [Time-limited test {}] client.ConnectionUtils(128): master/bfeb2336aed7:0 server-side Connection retries=45 2024-11-13T13:35:40,518 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-13T13:35:40,518 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-13T13:35:40,518 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-13T13:35:40,518 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-13T13:35:40,518 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-13T13:35:40,518 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-13T13:35:40,519 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-13T13:35:40,519 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:36873 2024-11-13T13:35:40,521 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:36873 connecting to ZooKeeper ensemble=127.0.0.1:53902 2024-11-13T13:35:40,673 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:368730x0, quorum=127.0.0.1:53902, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-13T13:35:40,674 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:36873-0x1013468a1810000 connected 2024-11-13T13:35:40,759 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T13:35:40,760 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T13:35:40,762 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:36873-0x1013468a1810000, quorum=127.0.0.1:53902, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-13T13:35:40,762 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:46467/user/jenkins/test-data/02132352-6242-17a1-f9d1-08ae046913f2, hbase.cluster.distributed=false 2024-11-13T13:35:40,764 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:36873-0x1013468a1810000, quorum=127.0.0.1:53902, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-13T13:35:40,764 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=36873 2024-11-13T13:35:40,765 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=36873 2024-11-13T13:35:40,765 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=36873 2024-11-13T13:35:40,765 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=36873 2024-11-13T13:35:40,765 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=36873 2024-11-13T13:35:40,789 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/bfeb2336aed7:0 server-side Connection retries=45 2024-11-13T13:35:40,789 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-13T13:35:40,790 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-13T13:35:40,790 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-13T13:35:40,790 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-13T13:35:40,790 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-13T13:35:40,790 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-13T13:35:40,790 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-13T13:35:40,791 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:45505 2024-11-13T13:35:40,793 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:45505 connecting to ZooKeeper ensemble=127.0.0.1:53902 2024-11-13T13:35:40,794 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T13:35:40,796 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T13:35:40,811 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:455050x0, quorum=127.0.0.1:53902, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-13T13:35:40,812 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:455050x0, quorum=127.0.0.1:53902, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-13T13:35:40,812 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:45505-0x1013468a1810001 connected 2024-11-13T13:35:40,812 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-13T13:35:40,813 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-13T13:35:40,814 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45505-0x1013468a1810001, quorum=127.0.0.1:53902, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-13T13:35:40,815 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45505-0x1013468a1810001, quorum=127.0.0.1:53902, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-13T13:35:40,818 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=45505 2024-11-13T13:35:40,820 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=45505 2024-11-13T13:35:40,820 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=45505 2024-11-13T13:35:40,821 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=45505 2024-11-13T13:35:40,821 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=45505 2024-11-13T13:35:40,833 DEBUG [M:0;bfeb2336aed7:36873 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;bfeb2336aed7:36873 2024-11-13T13:35:40,833 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/bfeb2336aed7,36873,1731504940518 2024-11-13T13:35:40,843 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45505-0x1013468a1810001, quorum=127.0.0.1:53902, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-13T13:35:40,843 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36873-0x1013468a1810000, quorum=127.0.0.1:53902, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-13T13:35:40,843 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:36873-0x1013468a1810000, quorum=127.0.0.1:53902, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/bfeb2336aed7,36873,1731504940518 2024-11-13T13:35:40,853 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45505-0x1013468a1810001, quorum=127.0.0.1:53902, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-13T13:35:40,853 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36873-0x1013468a1810000, quorum=127.0.0.1:53902, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T13:35:40,853 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45505-0x1013468a1810001, quorum=127.0.0.1:53902, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T13:35:40,854 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:36873-0x1013468a1810000, quorum=127.0.0.1:53902, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-13T13:35:40,854 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/bfeb2336aed7,36873,1731504940518 from backup master directory 2024-11-13T13:35:40,864 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36873-0x1013468a1810000, quorum=127.0.0.1:53902, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/bfeb2336aed7,36873,1731504940518 2024-11-13T13:35:40,864 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45505-0x1013468a1810001, quorum=127.0.0.1:53902, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-13T13:35:40,864 WARN [master/bfeb2336aed7:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-13T13:35:40,864 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36873-0x1013468a1810000, quorum=127.0.0.1:53902, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-13T13:35:40,864 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=bfeb2336aed7,36873,1731504940518 2024-11-13T13:35:40,868 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:46467/user/jenkins/test-data/02132352-6242-17a1-f9d1-08ae046913f2/hbase.id] with ID: 7b9a6952-257e-4e9e-a082-65fa869e767b 2024-11-13T13:35:40,868 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:46467/user/jenkins/test-data/02132352-6242-17a1-f9d1-08ae046913f2/.tmp/hbase.id 2024-11-13T13:35:40,874 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42613 is added to blk_1073741826_1002 (size=42) 2024-11-13T13:35:40,874 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36967 is added to blk_1073741826_1002 (size=42) 2024-11-13T13:35:40,875 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:46467/user/jenkins/test-data/02132352-6242-17a1-f9d1-08ae046913f2/.tmp/hbase.id]:[hdfs://localhost:46467/user/jenkins/test-data/02132352-6242-17a1-f9d1-08ae046913f2/hbase.id] 2024-11-13T13:35:40,888 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T13:35:40,888 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-13T13:35:40,889 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-13T13:35:40,903 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45505-0x1013468a1810001, quorum=127.0.0.1:53902, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T13:35:40,903 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36873-0x1013468a1810000, quorum=127.0.0.1:53902, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T13:35:40,910 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42613 is added to blk_1073741827_1003 (size=196) 2024-11-13T13:35:40,910 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36967 is added to blk_1073741827_1003 (size=196) 2024-11-13T13:35:40,911 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-13T13:35:40,912 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-13T13:35:40,912 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-13T13:35:40,921 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42613 is added to blk_1073741828_1004 (size=1189) 2024-11-13T13:35:40,921 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36967 is added to blk_1073741828_1004 (size=1189) 2024-11-13T13:35:40,922 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:46467/user/jenkins/test-data/02132352-6242-17a1-f9d1-08ae046913f2/MasterData/data/master/store 2024-11-13T13:35:40,930 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36967 is added to blk_1073741829_1005 (size=34) 2024-11-13T13:35:40,931 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42613 is added to blk_1073741829_1005 (size=34) 2024-11-13T13:35:40,931 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T13:35:40,932 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-13T13:35:40,932 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T13:35:40,932 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T13:35:40,932 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-13T13:35:40,932 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T13:35:40,932 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T13:35:40,932 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731504940932Disabling compacts and flushes for region at 1731504940932Disabling writes for close at 1731504940932Writing region close event to WAL at 1731504940932Closed at 1731504940932 2024-11-13T13:35:40,933 WARN [master/bfeb2336aed7:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:46467/user/jenkins/test-data/02132352-6242-17a1-f9d1-08ae046913f2/MasterData/data/master/store/.initializing 2024-11-13T13:35:40,933 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:46467/user/jenkins/test-data/02132352-6242-17a1-f9d1-08ae046913f2/MasterData/WALs/bfeb2336aed7,36873,1731504940518 2024-11-13T13:35:40,936 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=bfeb2336aed7%2C36873%2C1731504940518, suffix=, logDir=hdfs://localhost:46467/user/jenkins/test-data/02132352-6242-17a1-f9d1-08ae046913f2/MasterData/WALs/bfeb2336aed7,36873,1731504940518, archiveDir=hdfs://localhost:46467/user/jenkins/test-data/02132352-6242-17a1-f9d1-08ae046913f2/MasterData/oldWALs, maxLogs=10 2024-11-13T13:35:40,937 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor bfeb2336aed7%2C36873%2C1731504940518.1731504940937 2024-11-13T13:35:40,945 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/02132352-6242-17a1-f9d1-08ae046913f2/MasterData/WALs/bfeb2336aed7,36873,1731504940518/bfeb2336aed7%2C36873%2C1731504940518.1731504940937 2024-11-13T13:35:40,951 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43589:43589),(127.0.0.1/127.0.0.1:45975:45975)] 2024-11-13T13:35:40,952 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-13T13:35:40,952 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T13:35:40,952 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-13T13:35:40,952 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-13T13:35:40,954 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-13T13:35:40,956 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-13T13:35:40,956 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T13:35:40,957 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T13:35:40,957 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-13T13:35:40,959 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-13T13:35:40,959 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T13:35:40,959 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T13:35:40,959 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-13T13:35:40,961 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-13T13:35:40,961 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T13:35:40,962 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T13:35:40,962 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-13T13:35:40,963 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-13T13:35:40,963 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T13:35:40,964 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T13:35:40,964 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-13T13:35:40,965 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46467/user/jenkins/test-data/02132352-6242-17a1-f9d1-08ae046913f2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-13T13:35:40,966 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46467/user/jenkins/test-data/02132352-6242-17a1-f9d1-08ae046913f2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-13T13:35:40,967 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-13T13:35:40,967 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-13T13:35:40,968 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-13T13:35:40,969 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-13T13:35:40,972 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46467/user/jenkins/test-data/02132352-6242-17a1-f9d1-08ae046913f2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-13T13:35:40,972 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=705068, jitterRate=-0.10346013307571411}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-13T13:35:40,973 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731504940952Initializing all the Stores at 1731504940954 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731504940954Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731504940954Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731504940954Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731504940954Cleaning up temporary data from old regions at 1731504940967 (+13 ms)Region opened successfully at 1731504940973 (+6 ms) 2024-11-13T13:35:40,973 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-13T13:35:40,976 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5e74137a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=bfeb2336aed7/172.17.0.2:0 2024-11-13T13:35:40,977 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-13T13:35:40,978 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-13T13:35:40,978 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-13T13:35:40,978 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-13T13:35:40,978 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-13T13:35:40,979 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-13T13:35:40,979 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-13T13:35:40,982 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-13T13:35:40,983 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36873-0x1013468a1810000, quorum=127.0.0.1:53902, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-13T13:35:40,990 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-13T13:35:40,991 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-13T13:35:40,992 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36873-0x1013468a1810000, quorum=127.0.0.1:53902, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-13T13:35:41,001 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-13T13:35:41,001 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-13T13:35:41,003 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36873-0x1013468a1810000, quorum=127.0.0.1:53902, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-13T13:35:41,011 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-13T13:35:41,013 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36873-0x1013468a1810000, quorum=127.0.0.1:53902, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-13T13:35:41,022 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-13T13:35:41,025 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36873-0x1013468a1810000, quorum=127.0.0.1:53902, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-13T13:35:41,032 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-13T13:35:41,043 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36873-0x1013468a1810000, quorum=127.0.0.1:53902, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-13T13:35:41,043 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45505-0x1013468a1810001, quorum=127.0.0.1:53902, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-13T13:35:41,043 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45505-0x1013468a1810001, quorum=127.0.0.1:53902, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T13:35:41,043 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36873-0x1013468a1810000, quorum=127.0.0.1:53902, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T13:35:41,044 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=bfeb2336aed7,36873,1731504940518, sessionid=0x1013468a1810000, setting cluster-up flag (Was=false) 2024-11-13T13:35:41,064 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45505-0x1013468a1810001, quorum=127.0.0.1:53902, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T13:35:41,064 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36873-0x1013468a1810000, quorum=127.0.0.1:53902, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T13:35:41,074 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,38781,1731504892507/bfeb2336aed7%2C38781%2C1731504892507.1731504892747 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:35:41,078 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,33523,1731504891245/bfeb2336aed7%2C33523%2C1731504891245.meta.1731504892258.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:35:41,095 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-13T13:35:41,097 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=bfeb2336aed7,36873,1731504940518 2024-11-13T13:35:41,117 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36873-0x1013468a1810000, quorum=127.0.0.1:53902, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T13:35:41,117 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45505-0x1013468a1810001, quorum=127.0.0.1:53902, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T13:35:41,148 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-13T13:35:41,150 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=bfeb2336aed7,36873,1731504940518 2024-11-13T13:35:41,151 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:46467/user/jenkins/test-data/02132352-6242-17a1-f9d1-08ae046913f2/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-13T13:35:41,153 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-13T13:35:41,153 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-13T13:35:41,153 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-13T13:35:41,153 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: bfeb2336aed7,36873,1731504940518 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-13T13:35:41,155 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/bfeb2336aed7:0, corePoolSize=5, maxPoolSize=5 2024-11-13T13:35:41,155 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/bfeb2336aed7:0, corePoolSize=5, maxPoolSize=5 2024-11-13T13:35:41,155 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/bfeb2336aed7:0, corePoolSize=5, maxPoolSize=5 2024-11-13T13:35:41,155 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/bfeb2336aed7:0, corePoolSize=5, maxPoolSize=5 2024-11-13T13:35:41,155 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/bfeb2336aed7:0, corePoolSize=10, maxPoolSize=10 2024-11-13T13:35:41,155 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/bfeb2336aed7:0, corePoolSize=1, maxPoolSize=1 2024-11-13T13:35:41,155 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/bfeb2336aed7:0, corePoolSize=2, maxPoolSize=2 2024-11-13T13:35:41,155 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/bfeb2336aed7:0, corePoolSize=1, maxPoolSize=1 2024-11-13T13:35:41,156 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731504971156 2024-11-13T13:35:41,156 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-13T13:35:41,156 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-13T13:35:41,156 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-13T13:35:41,156 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-13T13:35:41,157 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-13T13:35:41,157 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-13T13:35:41,157 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-13T13:35:41,157 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-13T13:35:41,157 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-13T13:35:41,157 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-13T13:35:41,157 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-13T13:35:41,157 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-13T13:35:41,158 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-13T13:35:41,158 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-13T13:35:41,158 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/bfeb2336aed7:0:becomeActiveMaster-HFileCleaner.large.0-1731504941158,5,FailOnTimeoutGroup] 2024-11-13T13:35:41,158 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/bfeb2336aed7:0:becomeActiveMaster-HFileCleaner.small.0-1731504941158,5,FailOnTimeoutGroup] 2024-11-13T13:35:41,158 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-13T13:35:41,158 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-13T13:35:41,158 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-13T13:35:41,158 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-13T13:35:41,158 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T13:35:41,158 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-13T13:35:41,163 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42613 is added to blk_1073741831_1007 (size=1321) 2024-11-13T13:35:41,164 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36967 is added to blk_1073741831_1007 (size=1321) 2024-11-13T13:35:41,164 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:46467/user/jenkins/test-data/02132352-6242-17a1-f9d1-08ae046913f2/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-13T13:35:41,165 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:46467/user/jenkins/test-data/02132352-6242-17a1-f9d1-08ae046913f2 2024-11-13T13:35:41,170 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36967 is added to blk_1073741832_1008 (size=32) 2024-11-13T13:35:41,170 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42613 is added to blk_1073741832_1008 (size=32) 2024-11-13T13:35:41,171 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T13:35:41,172 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-13T13:35:41,174 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-13T13:35:41,174 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T13:35:41,174 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T13:35:41,174 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-13T13:35:41,176 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-13T13:35:41,176 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T13:35:41,176 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T13:35:41,176 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-13T13:35:41,178 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-13T13:35:41,178 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T13:35:41,178 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T13:35:41,179 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-13T13:35:41,180 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-13T13:35:41,180 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T13:35:41,180 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T13:35:41,181 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-13T13:35:41,181 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46467/user/jenkins/test-data/02132352-6242-17a1-f9d1-08ae046913f2/data/hbase/meta/1588230740 2024-11-13T13:35:41,182 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46467/user/jenkins/test-data/02132352-6242-17a1-f9d1-08ae046913f2/data/hbase/meta/1588230740 2024-11-13T13:35:41,183 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-13T13:35:41,184 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-13T13:35:41,184 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-13T13:35:41,186 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-13T13:35:41,188 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46467/user/jenkins/test-data/02132352-6242-17a1-f9d1-08ae046913f2/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-13T13:35:41,189 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=694807, jitterRate=-0.11650760471820831}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-13T13:35:41,190 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731504941171Initializing all the Stores at 1731504941172 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731504941172Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731504941172Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731504941172Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731504941172Cleaning up temporary data from old regions at 1731504941184 (+12 ms)Region opened successfully at 1731504941190 (+6 ms) 2024-11-13T13:35:41,190 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-13T13:35:41,190 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-13T13:35:41,190 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-13T13:35:41,190 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-13T13:35:41,190 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-13T13:35:41,190 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-13T13:35:41,190 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731504941190Disabling compacts and flushes for region at 1731504941190Disabling writes for close at 1731504941190Writing region close event to WAL at 1731504941190Closed at 1731504941190 2024-11-13T13:35:41,192 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-13T13:35:41,192 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-13T13:35:41,192 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-13T13:35:41,194 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-13T13:35:41,195 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-13T13:35:41,226 INFO [RS:0;bfeb2336aed7:45505 {}] regionserver.HRegionServer(746): ClusterId : 7b9a6952-257e-4e9e-a082-65fa869e767b 2024-11-13T13:35:41,226 DEBUG [RS:0;bfeb2336aed7:45505 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-13T13:35:41,234 DEBUG [RS:0;bfeb2336aed7:45505 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-13T13:35:41,234 DEBUG [RS:0;bfeb2336aed7:45505 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-13T13:35:41,244 DEBUG [RS:0;bfeb2336aed7:45505 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-13T13:35:41,245 DEBUG [RS:0;bfeb2336aed7:45505 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1cc9eb33, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=bfeb2336aed7/172.17.0.2:0 2024-11-13T13:35:41,262 DEBUG [RS:0;bfeb2336aed7:45505 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;bfeb2336aed7:45505 2024-11-13T13:35:41,262 INFO [RS:0;bfeb2336aed7:45505 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-13T13:35:41,262 INFO [RS:0;bfeb2336aed7:45505 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-13T13:35:41,262 DEBUG [RS:0;bfeb2336aed7:45505 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-13T13:35:41,262 INFO [RS:0;bfeb2336aed7:45505 {}] regionserver.HRegionServer(2659): reportForDuty to master=bfeb2336aed7,36873,1731504940518 with port=45505, startcode=1731504940789 2024-11-13T13:35:41,262 DEBUG [RS:0;bfeb2336aed7:45505 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-13T13:35:41,264 INFO [HMaster-EventLoopGroup-10-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50235, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.4 (auth:SIMPLE), service=RegionServerStatusService 2024-11-13T13:35:41,265 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36873 {}] master.ServerManager(363): Checking decommissioned status of RegionServer bfeb2336aed7,45505,1731504940789 2024-11-13T13:35:41,265 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36873 {}] master.ServerManager(517): Registering regionserver=bfeb2336aed7,45505,1731504940789 2024-11-13T13:35:41,266 DEBUG [RS:0;bfeb2336aed7:45505 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:46467/user/jenkins/test-data/02132352-6242-17a1-f9d1-08ae046913f2 2024-11-13T13:35:41,267 DEBUG [RS:0;bfeb2336aed7:45505 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:46467 2024-11-13T13:35:41,267 DEBUG [RS:0;bfeb2336aed7:45505 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-13T13:35:41,274 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36873-0x1013468a1810000, quorum=127.0.0.1:53902, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-13T13:35:41,275 DEBUG [RS:0;bfeb2336aed7:45505 {}] zookeeper.ZKUtil(111): regionserver:45505-0x1013468a1810001, quorum=127.0.0.1:53902, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/bfeb2336aed7,45505,1731504940789 2024-11-13T13:35:41,275 WARN [RS:0;bfeb2336aed7:45505 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-13T13:35:41,275 INFO [RS:0;bfeb2336aed7:45505 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-13T13:35:41,275 DEBUG [RS:0;bfeb2336aed7:45505 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:46467/user/jenkins/test-data/02132352-6242-17a1-f9d1-08ae046913f2/WALs/bfeb2336aed7,45505,1731504940789 2024-11-13T13:35:41,275 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [bfeb2336aed7,45505,1731504940789] 2024-11-13T13:35:41,279 INFO [RS:0;bfeb2336aed7:45505 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-13T13:35:41,280 INFO [RS:0;bfeb2336aed7:45505 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-13T13:35:41,281 INFO [RS:0;bfeb2336aed7:45505 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-13T13:35:41,281 INFO [RS:0;bfeb2336aed7:45505 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-13T13:35:41,281 INFO [RS:0;bfeb2336aed7:45505 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-13T13:35:41,282 INFO [RS:0;bfeb2336aed7:45505 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-13T13:35:41,282 INFO [RS:0;bfeb2336aed7:45505 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-13T13:35:41,282 DEBUG [RS:0;bfeb2336aed7:45505 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/bfeb2336aed7:0, corePoolSize=1, maxPoolSize=1 2024-11-13T13:35:41,282 DEBUG [RS:0;bfeb2336aed7:45505 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/bfeb2336aed7:0, corePoolSize=1, maxPoolSize=1 2024-11-13T13:35:41,282 DEBUG [RS:0;bfeb2336aed7:45505 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/bfeb2336aed7:0, corePoolSize=1, maxPoolSize=1 2024-11-13T13:35:41,282 DEBUG [RS:0;bfeb2336aed7:45505 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/bfeb2336aed7:0, corePoolSize=1, maxPoolSize=1 2024-11-13T13:35:41,282 DEBUG [RS:0;bfeb2336aed7:45505 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/bfeb2336aed7:0, corePoolSize=1, maxPoolSize=1 2024-11-13T13:35:41,282 DEBUG [RS:0;bfeb2336aed7:45505 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/bfeb2336aed7:0, corePoolSize=2, maxPoolSize=2 2024-11-13T13:35:41,282 DEBUG [RS:0;bfeb2336aed7:45505 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/bfeb2336aed7:0, corePoolSize=1, maxPoolSize=1 2024-11-13T13:35:41,282 DEBUG [RS:0;bfeb2336aed7:45505 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/bfeb2336aed7:0, corePoolSize=1, maxPoolSize=1 2024-11-13T13:35:41,282 DEBUG [RS:0;bfeb2336aed7:45505 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/bfeb2336aed7:0, corePoolSize=1, maxPoolSize=1 2024-11-13T13:35:41,282 DEBUG [RS:0;bfeb2336aed7:45505 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/bfeb2336aed7:0, corePoolSize=1, maxPoolSize=1 2024-11-13T13:35:41,282 DEBUG [RS:0;bfeb2336aed7:45505 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/bfeb2336aed7:0, corePoolSize=1, maxPoolSize=1 2024-11-13T13:35:41,283 DEBUG [RS:0;bfeb2336aed7:45505 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/bfeb2336aed7:0, corePoolSize=1, maxPoolSize=1 2024-11-13T13:35:41,283 DEBUG [RS:0;bfeb2336aed7:45505 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/bfeb2336aed7:0, corePoolSize=3, maxPoolSize=3 2024-11-13T13:35:41,283 DEBUG [RS:0;bfeb2336aed7:45505 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/bfeb2336aed7:0, corePoolSize=3, maxPoolSize=3 2024-11-13T13:35:41,284 INFO [RS:0;bfeb2336aed7:45505 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-13T13:35:41,284 INFO [RS:0;bfeb2336aed7:45505 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-13T13:35:41,284 INFO [RS:0;bfeb2336aed7:45505 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-13T13:35:41,284 INFO [RS:0;bfeb2336aed7:45505 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-13T13:35:41,284 INFO [RS:0;bfeb2336aed7:45505 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-13T13:35:41,284 INFO [RS:0;bfeb2336aed7:45505 {}] hbase.ChoreService(168): Chore ScheduledChore name=bfeb2336aed7,45505,1731504940789-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-13T13:35:41,298 INFO [RS:0;bfeb2336aed7:45505 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-13T13:35:41,298 INFO [RS:0;bfeb2336aed7:45505 {}] hbase.ChoreService(168): Chore ScheduledChore name=bfeb2336aed7,45505,1731504940789-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-13T13:35:41,298 INFO [RS:0;bfeb2336aed7:45505 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-13T13:35:41,299 INFO [RS:0;bfeb2336aed7:45505 {}] regionserver.Replication(171): bfeb2336aed7,45505,1731504940789 started 2024-11-13T13:35:41,311 INFO [RS:0;bfeb2336aed7:45505 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-13T13:35:41,311 INFO [RS:0;bfeb2336aed7:45505 {}] regionserver.HRegionServer(1482): Serving as bfeb2336aed7,45505,1731504940789, RpcServer on bfeb2336aed7/172.17.0.2:45505, sessionid=0x1013468a1810001 2024-11-13T13:35:41,311 DEBUG [RS:0;bfeb2336aed7:45505 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-13T13:35:41,311 DEBUG [RS:0;bfeb2336aed7:45505 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager bfeb2336aed7,45505,1731504940789 2024-11-13T13:35:41,311 DEBUG [RS:0;bfeb2336aed7:45505 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'bfeb2336aed7,45505,1731504940789' 2024-11-13T13:35:41,311 DEBUG [RS:0;bfeb2336aed7:45505 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-13T13:35:41,312 DEBUG [RS:0;bfeb2336aed7:45505 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-13T13:35:41,312 DEBUG [RS:0;bfeb2336aed7:45505 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-13T13:35:41,312 DEBUG [RS:0;bfeb2336aed7:45505 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-13T13:35:41,312 DEBUG [RS:0;bfeb2336aed7:45505 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager bfeb2336aed7,45505,1731504940789 2024-11-13T13:35:41,312 DEBUG [RS:0;bfeb2336aed7:45505 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'bfeb2336aed7,45505,1731504940789' 2024-11-13T13:35:41,312 DEBUG [RS:0;bfeb2336aed7:45505 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-13T13:35:41,312 DEBUG [RS:0;bfeb2336aed7:45505 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-13T13:35:41,313 DEBUG [RS:0;bfeb2336aed7:45505 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-13T13:35:41,313 INFO [RS:0;bfeb2336aed7:45505 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-13T13:35:41,313 INFO [RS:0;bfeb2336aed7:45505 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-13T13:35:41,346 WARN [bfeb2336aed7:36873 {}] assignment.AssignmentManager(2443): No servers available; cannot place 1 unassigned regions. 2024-11-13T13:35:41,417 INFO [RS:0;bfeb2336aed7:45505 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=bfeb2336aed7%2C45505%2C1731504940789, suffix=, logDir=hdfs://localhost:46467/user/jenkins/test-data/02132352-6242-17a1-f9d1-08ae046913f2/WALs/bfeb2336aed7,45505,1731504940789, archiveDir=hdfs://localhost:46467/user/jenkins/test-data/02132352-6242-17a1-f9d1-08ae046913f2/oldWALs, maxLogs=32 2024-11-13T13:35:41,418 INFO [RS:0;bfeb2336aed7:45505 {}] monitor.StreamSlowMonitor(122): New stream slow monitor bfeb2336aed7%2C45505%2C1731504940789.1731504941418 2024-11-13T13:35:41,427 INFO [RS:0;bfeb2336aed7:45505 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/02132352-6242-17a1-f9d1-08ae046913f2/WALs/bfeb2336aed7,45505,1731504940789/bfeb2336aed7%2C45505%2C1731504940789.1731504941418 2024-11-13T13:35:41,430 DEBUG [RS:0;bfeb2336aed7:45505 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43589:43589),(127.0.0.1/127.0.0.1:45975:45975)] 2024-11-13T13:35:41,596 DEBUG [bfeb2336aed7:36873 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-13T13:35:41,597 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=bfeb2336aed7,45505,1731504940789 2024-11-13T13:35:41,598 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as bfeb2336aed7,45505,1731504940789, state=OPENING 2024-11-13T13:35:41,611 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-13T13:35:41,622 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45505-0x1013468a1810001, quorum=127.0.0.1:53902, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T13:35:41,622 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36873-0x1013468a1810000, quorum=127.0.0.1:53902, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T13:35:41,623 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-13T13:35:41,623 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-13T13:35:41,623 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-13T13:35:41,623 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=bfeb2336aed7,45505,1731504940789}] 2024-11-13T13:35:41,779 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-13T13:35:41,781 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41763, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-13T13:35:41,784 INFO [RS_OPEN_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-13T13:35:41,784 INFO [RS_OPEN_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-13T13:35:41,786 INFO [RS_OPEN_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=bfeb2336aed7%2C45505%2C1731504940789.meta, suffix=.meta, logDir=hdfs://localhost:46467/user/jenkins/test-data/02132352-6242-17a1-f9d1-08ae046913f2/WALs/bfeb2336aed7,45505,1731504940789, archiveDir=hdfs://localhost:46467/user/jenkins/test-data/02132352-6242-17a1-f9d1-08ae046913f2/oldWALs, maxLogs=32 2024-11-13T13:35:41,787 INFO [RS_OPEN_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor bfeb2336aed7%2C45505%2C1731504940789.meta.1731504941786.meta 2024-11-13T13:35:41,791 INFO [RS_OPEN_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/02132352-6242-17a1-f9d1-08ae046913f2/WALs/bfeb2336aed7,45505,1731504940789/bfeb2336aed7%2C45505%2C1731504940789.meta.1731504941786.meta 2024-11-13T13:35:41,795 DEBUG [RS_OPEN_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45975:45975),(127.0.0.1/127.0.0.1:43589:43589)] 2024-11-13T13:35:41,796 DEBUG [RS_OPEN_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-13T13:35:41,796 DEBUG [RS_OPEN_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-13T13:35:41,796 DEBUG [RS_OPEN_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-13T13:35:41,797 INFO [RS_OPEN_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-13T13:35:41,797 DEBUG [RS_OPEN_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-13T13:35:41,797 DEBUG [RS_OPEN_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T13:35:41,797 DEBUG [RS_OPEN_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-13T13:35:41,797 DEBUG [RS_OPEN_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-13T13:35:41,798 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-13T13:35:41,799 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-13T13:35:41,799 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T13:35:41,799 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T13:35:41,799 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-13T13:35:41,800 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-13T13:35:41,800 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T13:35:41,800 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T13:35:41,801 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-13T13:35:41,801 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-13T13:35:41,801 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T13:35:41,802 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T13:35:41,802 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-13T13:35:41,802 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-13T13:35:41,803 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T13:35:41,803 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T13:35:41,803 DEBUG [RS_OPEN_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-13T13:35:41,804 DEBUG [RS_OPEN_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46467/user/jenkins/test-data/02132352-6242-17a1-f9d1-08ae046913f2/data/hbase/meta/1588230740 2024-11-13T13:35:41,805 DEBUG [RS_OPEN_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46467/user/jenkins/test-data/02132352-6242-17a1-f9d1-08ae046913f2/data/hbase/meta/1588230740 2024-11-13T13:35:41,806 DEBUG [RS_OPEN_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-13T13:35:41,806 DEBUG [RS_OPEN_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-13T13:35:41,807 DEBUG [RS_OPEN_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-13T13:35:41,808 DEBUG [RS_OPEN_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-13T13:35:41,809 INFO [RS_OPEN_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=795300, jitterRate=0.011276766657829285}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-13T13:35:41,809 DEBUG [RS_OPEN_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-13T13:35:41,809 DEBUG [RS_OPEN_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731504941797Writing region info on filesystem at 1731504941797Initializing all the Stores at 1731504941798 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731504941798Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731504941798Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731504941798Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731504941798Cleaning up temporary data from old regions at 1731504941806 (+8 ms)Running coprocessor post-open hooks at 1731504941809 (+3 ms)Region opened successfully at 1731504941809 2024-11-13T13:35:41,810 INFO [RS_OPEN_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731504941779 2024-11-13T13:35:41,812 DEBUG [RS_OPEN_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-13T13:35:41,813 INFO [RS_OPEN_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-13T13:35:41,813 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=bfeb2336aed7,45505,1731504940789 2024-11-13T13:35:41,814 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as bfeb2336aed7,45505,1731504940789, state=OPEN 2024-11-13T13:35:41,854 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36873-0x1013468a1810000, quorum=127.0.0.1:53902, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-13T13:35:41,854 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45505-0x1013468a1810001, quorum=127.0.0.1:53902, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-13T13:35:41,855 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=bfeb2336aed7,45505,1731504940789 2024-11-13T13:35:41,855 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-13T13:35:41,855 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-13T13:35:41,860 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-13T13:35:41,860 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=bfeb2336aed7,45505,1731504940789 in 232 msec 2024-11-13T13:35:41,863 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-13T13:35:41,863 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 668 msec 2024-11-13T13:35:41,864 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-13T13:35:41,865 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-13T13:35:41,866 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-13T13:35:41,866 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=bfeb2336aed7,45505,1731504940789, seqNum=-1] 2024-11-13T13:35:41,867 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-13T13:35:41,868 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58161, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-13T13:35:41,876 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 721 msec 2024-11-13T13:35:41,876 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731504941876, completionTime=-1 2024-11-13T13:35:41,876 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-13T13:35:41,876 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-13T13:35:41,878 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=1 2024-11-13T13:35:41,878 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731505001878 2024-11-13T13:35:41,878 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731505061878 2024-11-13T13:35:41,879 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 2 msec 2024-11-13T13:35:41,879 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=bfeb2336aed7,36873,1731504940518-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-13T13:35:41,879 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=bfeb2336aed7,36873,1731504940518-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-13T13:35:41,879 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=bfeb2336aed7,36873,1731504940518-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-13T13:35:41,879 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-bfeb2336aed7:36873, period=300000, unit=MILLISECONDS is enabled. 2024-11-13T13:35:41,879 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-13T13:35:41,880 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-13T13:35:41,882 DEBUG [master/bfeb2336aed7:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-13T13:35:41,885 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.021sec 2024-11-13T13:35:41,885 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-13T13:35:41,885 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-13T13:35:41,885 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-13T13:35:41,885 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-13T13:35:41,885 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-13T13:35:41,885 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=bfeb2336aed7,36873,1731504940518-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-13T13:35:41,886 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=bfeb2336aed7,36873,1731504940518-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-13T13:35:41,889 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-13T13:35:41,889 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-13T13:35:41,889 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=bfeb2336aed7,36873,1731504940518-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-13T13:35:41,925 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6beed7e8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T13:35:41,925 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request bfeb2336aed7,36873,-1 for getting cluster id 2024-11-13T13:35:41,925 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-13T13:35:41,926 DEBUG [HMaster-EventLoopGroup-10-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '7b9a6952-257e-4e9e-a082-65fa869e767b' 2024-11-13T13:35:41,927 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-13T13:35:41,927 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "7b9a6952-257e-4e9e-a082-65fa869e767b" 2024-11-13T13:35:41,927 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@60da48cc, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T13:35:41,927 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [bfeb2336aed7,36873,-1] 2024-11-13T13:35:41,927 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-13T13:35:41,927 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T13:35:41,929 INFO [HMaster-EventLoopGroup-10-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47684, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-13T13:35:41,929 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@61726e31, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T13:35:41,930 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-13T13:35:41,931 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=bfeb2336aed7,45505,1731504940789, seqNum=-1] 2024-11-13T13:35:41,931 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-13T13:35:41,933 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38190, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-13T13:35:41,935 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=bfeb2336aed7,36873,1731504940518 2024-11-13T13:35:41,936 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T13:35:41,939 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-13T13:35:41,939 INFO [Time-limited test {}] wal.TestLogRolling(320): Starting testLogRollOnPipelineRestart 2024-11-13T13:35:41,939 INFO [Time-limited test {}] wal.TestLogRolling(323): Replication=2 2024-11-13T13:35:41,940 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-13T13:35:41,941 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.AsyncConnectionImpl(321): The fetched master address is bfeb2336aed7,36873,1731504940518 2024-11-13T13:35:41,941 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@5a2e7a96 2024-11-13T13:35:41,941 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-13T13:35:41,943 INFO [HMaster-EventLoopGroup-10-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47692, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-13T13:35:41,944 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36873 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-13T13:35:41,944 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36873 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-13T13:35:41,944 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36873 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testLogRollOnPipelineRestart', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-13T13:35:41,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36873 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart 2024-11-13T13:35:41,947 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_PRE_OPERATION 2024-11-13T13:35:41,947 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T13:35:41,947 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36873 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRollOnPipelineRestart" procId is: 4 2024-11-13T13:35:41,948 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-13T13:35:41,948 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-13T13:35:41,948 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-13T13:35:41,948 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-13T13:35:41,949 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-13T13:35:41,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36873 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-13T13:35:41,955 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36967 is added to blk_1073741835_1011 (size=395) 2024-11-13T13:35:41,955 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42613 is added to blk_1073741835_1011 (size=395) 2024-11-13T13:35:41,957 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 1b0b3fe3cfd275f72fe8b5466cb336bf, NAME => 'TestLogRolling-testLogRollOnPipelineRestart,,1731504941944.1b0b3fe3cfd275f72fe8b5466cb336bf.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRollOnPipelineRestart', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:46467/user/jenkins/test-data/02132352-6242-17a1-f9d1-08ae046913f2 2024-11-13T13:35:41,963 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36967 is added to blk_1073741836_1012 (size=78) 2024-11-13T13:35:41,963 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42613 is added to blk_1073741836_1012 (size=78) 2024-11-13T13:35:41,964 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnPipelineRestart,,1731504941944.1b0b3fe3cfd275f72fe8b5466cb336bf.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T13:35:41,964 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1722): Closing 1b0b3fe3cfd275f72fe8b5466cb336bf, disabling compactions & flushes 2024-11-13T13:35:41,964 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnPipelineRestart,,1731504941944.1b0b3fe3cfd275f72fe8b5466cb336bf. 2024-11-13T13:35:41,964 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnPipelineRestart,,1731504941944.1b0b3fe3cfd275f72fe8b5466cb336bf. 2024-11-13T13:35:41,964 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnPipelineRestart,,1731504941944.1b0b3fe3cfd275f72fe8b5466cb336bf. after waiting 0 ms 2024-11-13T13:35:41,964 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnPipelineRestart,,1731504941944.1b0b3fe3cfd275f72fe8b5466cb336bf. 2024-11-13T13:35:41,964 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnPipelineRestart,,1731504941944.1b0b3fe3cfd275f72fe8b5466cb336bf. 2024-11-13T13:35:41,965 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1676): Region close journal for 1b0b3fe3cfd275f72fe8b5466cb336bf: Waiting for close lock at 1731504941964Disabling compacts and flushes for region at 1731504941964Disabling writes for close at 1731504941964Writing region close event to WAL at 1731504941964Closed at 1731504941964 2024-11-13T13:35:41,966 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_ADD_TO_META 2024-11-13T13:35:41,966 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRollOnPipelineRestart,,1731504941944.1b0b3fe3cfd275f72fe8b5466cb336bf.","families":{"info":[{"qualifier":"regioninfo","vlen":77,"tag":[],"timestamp":"1731504941966"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731504941966"}]},"ts":"1731504941966"} 2024-11-13T13:35:41,969 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-13T13:35:41,970 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-13T13:35:41,970 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnPipelineRestart","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731504941970"}]},"ts":"1731504941970"} 2024-11-13T13:35:41,973 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnPipelineRestart, state=ENABLING in hbase:meta 2024-11-13T13:35:41,973 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=1b0b3fe3cfd275f72fe8b5466cb336bf, ASSIGN}] 2024-11-13T13:35:41,974 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=1b0b3fe3cfd275f72fe8b5466cb336bf, ASSIGN 2024-11-13T13:35:41,975 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=1b0b3fe3cfd275f72fe8b5466cb336bf, ASSIGN; state=OFFLINE, location=bfeb2336aed7,45505,1731504940789; forceNewPlan=false, retain=false 2024-11-13T13:35:42,075 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,38781,1731504892507/bfeb2336aed7%2C38781%2C1731504892507.1731504892747 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:35:42,079 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,33523,1731504891245/bfeb2336aed7%2C33523%2C1731504891245.meta.1731504892258.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:35:42,126 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=1b0b3fe3cfd275f72fe8b5466cb336bf, regionState=OPENING, regionLocation=bfeb2336aed7,45505,1731504940789 2024-11-13T13:35:42,130 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-11-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=1b0b3fe3cfd275f72fe8b5466cb336bf, ASSIGN because future has completed 2024-11-13T13:35:42,130 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1b0b3fe3cfd275f72fe8b5466cb336bf, server=bfeb2336aed7,45505,1731504940789}] 2024-11-13T13:35:42,289 INFO [RS_OPEN_REGION-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRollOnPipelineRestart,,1731504941944.1b0b3fe3cfd275f72fe8b5466cb336bf. 2024-11-13T13:35:42,289 DEBUG [RS_OPEN_REGION-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 1b0b3fe3cfd275f72fe8b5466cb336bf, NAME => 'TestLogRolling-testLogRollOnPipelineRestart,,1731504941944.1b0b3fe3cfd275f72fe8b5466cb336bf.', STARTKEY => '', ENDKEY => ''} 2024-11-13T13:35:42,289 DEBUG [RS_OPEN_REGION-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRollOnPipelineRestart 1b0b3fe3cfd275f72fe8b5466cb336bf 2024-11-13T13:35:42,290 DEBUG [RS_OPEN_REGION-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnPipelineRestart,,1731504941944.1b0b3fe3cfd275f72fe8b5466cb336bf.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T13:35:42,290 DEBUG [RS_OPEN_REGION-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 1b0b3fe3cfd275f72fe8b5466cb336bf 2024-11-13T13:35:42,290 DEBUG [RS_OPEN_REGION-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 1b0b3fe3cfd275f72fe8b5466cb336bf 2024-11-13T13:35:42,293 INFO [StoreOpener-1b0b3fe3cfd275f72fe8b5466cb336bf-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1b0b3fe3cfd275f72fe8b5466cb336bf 2024-11-13T13:35:42,295 INFO [StoreOpener-1b0b3fe3cfd275f72fe8b5466cb336bf-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1b0b3fe3cfd275f72fe8b5466cb336bf columnFamilyName info 2024-11-13T13:35:42,296 DEBUG [StoreOpener-1b0b3fe3cfd275f72fe8b5466cb336bf-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T13:35:42,296 INFO [StoreOpener-1b0b3fe3cfd275f72fe8b5466cb336bf-1 {}] regionserver.HStore(327): Store=1b0b3fe3cfd275f72fe8b5466cb336bf/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T13:35:42,296 DEBUG [RS_OPEN_REGION-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 1b0b3fe3cfd275f72fe8b5466cb336bf 2024-11-13T13:35:42,297 DEBUG [RS_OPEN_REGION-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46467/user/jenkins/test-data/02132352-6242-17a1-f9d1-08ae046913f2/data/default/TestLogRolling-testLogRollOnPipelineRestart/1b0b3fe3cfd275f72fe8b5466cb336bf 2024-11-13T13:35:42,297 DEBUG [RS_OPEN_REGION-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46467/user/jenkins/test-data/02132352-6242-17a1-f9d1-08ae046913f2/data/default/TestLogRolling-testLogRollOnPipelineRestart/1b0b3fe3cfd275f72fe8b5466cb336bf 2024-11-13T13:35:42,298 DEBUG [RS_OPEN_REGION-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 1b0b3fe3cfd275f72fe8b5466cb336bf 2024-11-13T13:35:42,298 DEBUG [RS_OPEN_REGION-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 1b0b3fe3cfd275f72fe8b5466cb336bf 2024-11-13T13:35:42,300 DEBUG [RS_OPEN_REGION-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 1b0b3fe3cfd275f72fe8b5466cb336bf 2024-11-13T13:35:42,302 DEBUG [RS_OPEN_REGION-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46467/user/jenkins/test-data/02132352-6242-17a1-f9d1-08ae046913f2/data/default/TestLogRolling-testLogRollOnPipelineRestart/1b0b3fe3cfd275f72fe8b5466cb336bf/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-13T13:35:42,302 INFO [RS_OPEN_REGION-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 1b0b3fe3cfd275f72fe8b5466cb336bf; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=846040, jitterRate=0.07579566538333893}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-13T13:35:42,302 DEBUG [RS_OPEN_REGION-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1b0b3fe3cfd275f72fe8b5466cb336bf 2024-11-13T13:35:42,303 DEBUG [RS_OPEN_REGION-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 1b0b3fe3cfd275f72fe8b5466cb336bf: Running coprocessor pre-open hook at 1731504942290Writing region info on filesystem at 1731504942290Initializing all the Stores at 1731504942292 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731504942292Cleaning up temporary data from old regions at 1731504942298 (+6 ms)Running coprocessor post-open hooks at 1731504942302 (+4 ms)Region opened successfully at 1731504942303 (+1 ms) 2024-11-13T13:35:42,304 INFO [RS_OPEN_REGION-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRollOnPipelineRestart,,1731504941944.1b0b3fe3cfd275f72fe8b5466cb336bf., pid=6, masterSystemTime=1731504942283 2024-11-13T13:35:42,306 DEBUG [RS_OPEN_REGION-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRollOnPipelineRestart,,1731504941944.1b0b3fe3cfd275f72fe8b5466cb336bf. 2024-11-13T13:35:42,306 INFO [RS_OPEN_REGION-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRollOnPipelineRestart,,1731504941944.1b0b3fe3cfd275f72fe8b5466cb336bf. 2024-11-13T13:35:42,307 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=1b0b3fe3cfd275f72fe8b5466cb336bf, regionState=OPEN, openSeqNum=2, regionLocation=bfeb2336aed7,45505,1731504940789 2024-11-13T13:35:42,309 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-11-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1b0b3fe3cfd275f72fe8b5466cb336bf, server=bfeb2336aed7,45505,1731504940789 because future has completed 2024-11-13T13:35:42,313 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-13T13:35:42,313 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 1b0b3fe3cfd275f72fe8b5466cb336bf, server=bfeb2336aed7,45505,1731504940789 in 181 msec 2024-11-13T13:35:42,316 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-13T13:35:42,316 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=1b0b3fe3cfd275f72fe8b5466cb336bf, ASSIGN in 340 msec 2024-11-13T13:35:42,317 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-13T13:35:42,317 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnPipelineRestart","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731504942317"}]},"ts":"1731504942317"} 2024-11-13T13:35:42,319 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnPipelineRestart, state=ENABLED in hbase:meta 2024-11-13T13:35:42,320 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_POST_OPERATION 2024-11-13T13:35:42,322 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart in 376 msec 2024-11-13T13:35:43,076 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,38781,1731504892507/bfeb2336aed7%2C38781%2C1731504892507.1731504892747 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:35:43,080 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,33523,1731504891245/bfeb2336aed7%2C33523%2C1731504891245.meta.1731504892258.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:35:44,077 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,38781,1731504892507/bfeb2336aed7%2C38781%2C1731504892507.1731504892747 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:35:44,081 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,33523,1731504891245/bfeb2336aed7%2C33523%2C1731504891245.meta.1731504892258.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:35:45,078 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,38781,1731504892507/bfeb2336aed7%2C38781%2C1731504892507.1731504892747 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:35:45,082 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,33523,1731504891245/bfeb2336aed7%2C33523%2C1731504891245.meta.1731504892258.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:35:46,080 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,38781,1731504892507/bfeb2336aed7%2C38781%2C1731504892507.1731504892747 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:35:46,083 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,33523,1731504891245/bfeb2336aed7%2C33523%2C1731504891245.meta.1731504892258.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:35:46,814 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:35:46,815 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:35:46,815 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:35:46,816 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:35:46,816 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:35:46,816 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:35:46,820 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:35:46,820 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:35:46,820 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:35:46,822 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:35:47,081 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,38781,1731504892507/bfeb2336aed7%2C38781%2C1731504892507.1731504892747 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:35:47,084 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,33523,1731504891245/bfeb2336aed7%2C33523%2C1731504891245.meta.1731504892258.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:35:47,326 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-13T13:35:47,347 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:35:47,347 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:35:47,347 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:35:47,347 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:35:47,347 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:35:47,348 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:35:47,351 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:35:47,351 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:35:47,351 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:35:47,354 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:35:47,359 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-13T13:35:47,360 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRollOnPipelineRestart' 2024-11-13T13:35:48,083 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,38781,1731504892507/bfeb2336aed7%2C38781%2C1731504892507.1731504892747 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:35:48,085 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,33523,1731504891245/bfeb2336aed7%2C33523%2C1731504891245.meta.1731504892258.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:35:49,084 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,38781,1731504892507/bfeb2336aed7%2C38781%2C1731504892507.1731504892747 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:35:49,086 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,33523,1731504891245/bfeb2336aed7%2C33523%2C1731504891245.meta.1731504892258.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:35:50,084 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,38781,1731504892507/bfeb2336aed7%2C38781%2C1731504892507.1731504892747 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:35:50,087 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,33523,1731504891245/bfeb2336aed7%2C33523%2C1731504891245.meta.1731504892258.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:35:51,085 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,38781,1731504892507/bfeb2336aed7%2C38781%2C1731504892507.1731504892747 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:35:51,088 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,33523,1731504891245/bfeb2336aed7%2C33523%2C1731504891245.meta.1731504892258.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:35:51,948 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart 2024-11-13T13:35:51,948 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart Metrics about Tables on a single HBase RegionServer 2024-11-13T13:35:51,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36873 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-13T13:35:51,972 INFO [RPCClient-NioEventLoopGroup-4-15 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRollOnPipelineRestart completed 2024-11-13T13:35:51,972 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRollOnPipelineRestart,, stopping at row=TestLogRolling-testLogRollOnPipelineRestart ,, for max=2147483647 with caching=100 2024-11-13T13:35:51,977 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRollOnPipelineRestart 2024-11-13T13:35:51,977 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRollOnPipelineRestart,,1731504941944.1b0b3fe3cfd275f72fe8b5466cb336bf. 2024-11-13T13:35:51,982 DEBUG [RPCClient-NioEventLoopGroup-4-14 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRollOnPipelineRestart', row='row1002', locateType=CURRENT is [region=TestLogRolling-testLogRollOnPipelineRestart,,1731504941944.1b0b3fe3cfd275f72fe8b5466cb336bf., hostname=bfeb2336aed7,45505,1731504940789, seqNum=2] 2024-11-13T13:35:52,086 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,38781,1731504892507/bfeb2336aed7%2C38781%2C1731504892507.1731504892747 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:35:52,088 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,33523,1731504891245/bfeb2336aed7%2C33523%2C1731504891245.meta.1731504892258.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:35:53,086 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,38781,1731504892507/bfeb2336aed7%2C38781%2C1731504892507.1731504892747 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:35:53,089 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,33523,1731504891245/bfeb2336aed7%2C33523%2C1731504891245.meta.1731504892258.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:35:53,986 INFO [Time-limited test {}] wal.TestLogRolling(360): log.getCurrentFileName()): hdfs://localhost:46467/user/jenkins/test-data/02132352-6242-17a1-f9d1-08ae046913f2/WALs/bfeb2336aed7,45505,1731504940789/bfeb2336aed7%2C45505%2C1731504940789.1731504941418 2024-11-13T13:35:53,986 WARN [ResponseProcessor for block BP-1348924368-172.17.0.2-1731504937923:blk_1073741834_1010 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1348924368-172.17.0.2-1731504937923:blk_1073741834_1010 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T13:35:53,986 WARN [ResponseProcessor for block BP-1348924368-172.17.0.2-1731504937923:blk_1073741830_1006 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1348924368-172.17.0.2-1731504937923:blk_1073741830_1006 java.io.IOException: Bad response ERROR for BP-1348924368-172.17.0.2-1731504937923:blk_1073741830_1006 from datanode DatanodeInfoWithStorage[127.0.0.1:42613,DS-6e6cf97b-09b5-430b-92bc-37ad8ab6214f,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T13:35:53,987 WARN [DataStreamer for file /user/jenkins/test-data/02132352-6242-17a1-f9d1-08ae046913f2/MasterData/WALs/bfeb2336aed7,36873,1731504940518/bfeb2336aed7%2C36873%2C1731504940518.1731504940937 block BP-1348924368-172.17.0.2-1731504937923:blk_1073741830_1006 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1348924368-172.17.0.2-1731504937923:blk_1073741830_1006 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36967,DS-ab9a2161-65c5-434f-8fcb-2eae2752a222,DISK], DatanodeInfoWithStorage[127.0.0.1:42613,DS-6e6cf97b-09b5-430b-92bc-37ad8ab6214f,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:42613,DS-6e6cf97b-09b5-430b-92bc-37ad8ab6214f,DISK]) is bad. 2024-11-13T13:35:53,987 WARN [DataStreamer for file /user/jenkins/test-data/02132352-6242-17a1-f9d1-08ae046913f2/WALs/bfeb2336aed7,45505,1731504940789/bfeb2336aed7%2C45505%2C1731504940789.meta.1731504941786.meta block BP-1348924368-172.17.0.2-1731504937923:blk_1073741834_1010 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1348924368-172.17.0.2-1731504937923:blk_1073741834_1010 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42613,DS-6e6cf97b-09b5-430b-92bc-37ad8ab6214f,DISK], DatanodeInfoWithStorage[127.0.0.1:36967,DS-ab9a2161-65c5-434f-8fcb-2eae2752a222,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:42613,DS-6e6cf97b-09b5-430b-92bc-37ad8ab6214f,DISK]) is bad. 2024-11-13T13:35:53,987 WARN [PacketResponder: BP-1348924368-172.17.0.2-1731504937923:blk_1073741830_1006, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:42613] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T13:35:53,988 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1647991966_22 at /127.0.0.1:35836 [Receiving block BP-1348924368-172.17.0.2-1731504937923:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:36967:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:35836 dst: /127.0.0.1:36967 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T13:35:53,988 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1003325979_22 at /127.0.0.1:56640 [Receiving block BP-1348924368-172.17.0.2-1731504937923:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:42613:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:56640 dst: /127.0.0.1:42613 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T13:35:53,988 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1003325979_22 at /127.0.0.1:35882 [Receiving block BP-1348924368-172.17.0.2-1731504937923:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:36967:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:35882 dst: /127.0.0.1:36967 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T13:35:53,988 WARN [ResponseProcessor for block BP-1348924368-172.17.0.2-1731504937923:blk_1073741833_1009 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1348924368-172.17.0.2-1731504937923:blk_1073741833_1009 java.io.IOException: Bad response ERROR for BP-1348924368-172.17.0.2-1731504937923:blk_1073741833_1009 from datanode DatanodeInfoWithStorage[127.0.0.1:42613,DS-6e6cf97b-09b5-430b-92bc-37ad8ab6214f,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T13:35:53,988 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1647991966_22 at /127.0.0.1:56584 [Receiving block BP-1348924368-172.17.0.2-1731504937923:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:42613:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:56584 dst: /127.0.0.1:42613 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T13:35:53,988 WARN [DataStreamer for file /user/jenkins/test-data/02132352-6242-17a1-f9d1-08ae046913f2/WALs/bfeb2336aed7,45505,1731504940789/bfeb2336aed7%2C45505%2C1731504940789.1731504941418 block BP-1348924368-172.17.0.2-1731504937923:blk_1073741833_1009 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1348924368-172.17.0.2-1731504937923:blk_1073741833_1009 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36967,DS-ab9a2161-65c5-434f-8fcb-2eae2752a222,DISK], DatanodeInfoWithStorage[127.0.0.1:42613,DS-6e6cf97b-09b5-430b-92bc-37ad8ab6214f,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:42613,DS-6e6cf97b-09b5-430b-92bc-37ad8ab6214f,DISK]) is bad. 2024-11-13T13:35:53,988 WARN [PacketResponder: BP-1348924368-172.17.0.2-1731504937923:blk_1073741833_1009, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:42613] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T13:35:53,989 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1003325979_22 at /127.0.0.1:35878 [Receiving block BP-1348924368-172.17.0.2-1731504937923:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:36967:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:35878 dst: /127.0.0.1:36967 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T13:35:53,989 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1003325979_22 at /127.0.0.1:56624 [Receiving block BP-1348924368-172.17.0.2-1731504937923:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:42613:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:56624 dst: /127.0.0.1:42613 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T13:35:54,029 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@37687418{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-13T13:35:54,030 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@e7f214b{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-13T13:35:54,030 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-13T13:35:54,030 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@47946b20{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-13T13:35:54,030 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@64434c96{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1bfad055-190e-7fc9-ff21-38a64540517f/hadoop.log.dir/,STOPPED} 2024-11-13T13:35:54,032 WARN [BP-1348924368-172.17.0.2-1731504937923 heartbeating to localhost/127.0.0.1:46467 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-13T13:35:54,032 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-13T13:35:54,032 WARN [BP-1348924368-172.17.0.2-1731504937923 heartbeating to localhost/127.0.0.1:46467 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1348924368-172.17.0.2-1731504937923 (Datanode Uuid 3a717171-4bca-4788-8f6a-d577ec78ab7f) service to localhost/127.0.0.1:46467 2024-11-13T13:35:54,032 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-13T13:35:54,032 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1bfad055-190e-7fc9-ff21-38a64540517f/cluster_7d13f452-3b8c-c473-f020-215ab3c51fcd/data/data3/current/BP-1348924368-172.17.0.2-1731504937923 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-13T13:35:54,032 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1bfad055-190e-7fc9-ff21-38a64540517f/cluster_7d13f452-3b8c-c473-f020-215ab3c51fcd/data/data4/current/BP-1348924368-172.17.0.2-1731504937923 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-13T13:35:54,033 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-13T13:35:54,042 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-13T13:35:54,047 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-13T13:35:54,047 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-13T13:35:54,047 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-13T13:35:54,048 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-13T13:35:54,048 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2460467f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1bfad055-190e-7fc9-ff21-38a64540517f/hadoop.log.dir/,AVAILABLE} 2024-11-13T13:35:54,049 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@75f58649{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-13T13:35:54,087 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,38781,1731504892507/bfeb2336aed7%2C38781%2C1731504892507.1731504892747 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:35:54,091 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,33523,1731504891245/bfeb2336aed7%2C33523%2C1731504891245.meta.1731504892258.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:35:54,159 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4c5aa216{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1bfad055-190e-7fc9-ff21-38a64540517f/java.io.tmpdir/jetty-localhost-36439-hadoop-hdfs-3_4_1-tests_jar-_-any-794230000136955219/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-13T13:35:54,160 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@46332285{HTTP/1.1, (http/1.1)}{localhost:36439} 2024-11-13T13:35:54,160 INFO [Time-limited test {}] server.Server(415): Started @168847ms 2024-11-13T13:35:54,161 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-13T13:35:54,221 WARN [ResponseProcessor for block BP-1348924368-172.17.0.2-1731504937923:blk_1073741833_1014 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1348924368-172.17.0.2-1731504937923:blk_1073741833_1014 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T13:35:54,221 WARN [ResponseProcessor for block BP-1348924368-172.17.0.2-1731504937923:blk_1073741834_1013 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1348924368-172.17.0.2-1731504937923:blk_1073741834_1013 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T13:35:54,221 WARN [ResponseProcessor for block BP-1348924368-172.17.0.2-1731504937923:blk_1073741830_1015 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1348924368-172.17.0.2-1731504937923:blk_1073741830_1015 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T13:35:54,222 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1003325979_22 at /127.0.0.1:37916 [Receiving block BP-1348924368-172.17.0.2-1731504937923:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:36967:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:37916 dst: /127.0.0.1:36967 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T13:35:54,222 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1003325979_22 at /127.0.0.1:37902 [Receiving block BP-1348924368-172.17.0.2-1731504937923:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:36967:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:37902 dst: /127.0.0.1:36967 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T13:35:54,222 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1647991966_22 at /127.0.0.1:37918 [Receiving block BP-1348924368-172.17.0.2-1731504937923:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:36967:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:37918 dst: /127.0.0.1:36967 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T13:35:54,233 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5fb42f9a{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-13T13:35:54,234 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6d74671d{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-13T13:35:54,234 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-13T13:35:54,234 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@13646a74{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-13T13:35:54,234 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1fc8bed8{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1bfad055-190e-7fc9-ff21-38a64540517f/hadoop.log.dir/,STOPPED} 2024-11-13T13:35:54,236 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-13T13:35:54,236 WARN [BP-1348924368-172.17.0.2-1731504937923 heartbeating to localhost/127.0.0.1:46467 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-13T13:35:54,236 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-13T13:35:54,236 WARN [BP-1348924368-172.17.0.2-1731504937923 heartbeating to localhost/127.0.0.1:46467 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1348924368-172.17.0.2-1731504937923 (Datanode Uuid 0cea774c-18a7-48aa-896e-30dea8c1d062) service to localhost/127.0.0.1:46467 2024-11-13T13:35:54,237 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1bfad055-190e-7fc9-ff21-38a64540517f/cluster_7d13f452-3b8c-c473-f020-215ab3c51fcd/data/data1/current/BP-1348924368-172.17.0.2-1731504937923 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-13T13:35:54,237 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1bfad055-190e-7fc9-ff21-38a64540517f/cluster_7d13f452-3b8c-c473-f020-215ab3c51fcd/data/data2/current/BP-1348924368-172.17.0.2-1731504937923 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-13T13:35:54,237 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-13T13:35:54,262 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-13T13:35:54,307 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-13T13:35:54,312 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-13T13:35:54,312 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-13T13:35:54,312 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-13T13:35:54,313 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@75ed142f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1bfad055-190e-7fc9-ff21-38a64540517f/hadoop.log.dir/,AVAILABLE} 2024-11-13T13:35:54,313 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1ede944f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-13T13:35:54,427 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7180ac25{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1bfad055-190e-7fc9-ff21-38a64540517f/java.io.tmpdir/jetty-localhost-38299-hadoop-hdfs-3_4_1-tests_jar-_-any-7914283579104635282/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-13T13:35:54,427 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@248d0d6a{HTTP/1.1, (http/1.1)}{localhost:38299} 2024-11-13T13:35:54,428 INFO [Time-limited test {}] server.Server(415): Started @169115ms 2024-11-13T13:35:54,429 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-13T13:35:54,705 WARN [Thread-1337 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-13T13:35:54,708 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xb741a4c1a0480f36 with lease ID 0x2b905e81678a495d: from storage DS-6e6cf97b-09b5-430b-92bc-37ad8ab6214f node DatanodeRegistration(127.0.0.1:35139, datanodeUuid=3a717171-4bca-4788-8f6a-d577ec78ab7f, infoPort=39067, infoSecurePort=0, ipcPort=37473, storageInfo=lv=-57;cid=testClusterID;nsid=1427826847;c=1731504937923), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-13T13:35:54,708 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xb741a4c1a0480f36 with lease ID 0x2b905e81678a495d: from storage DS-0c2e36f5-bdf3-40e8-91b1-b4a686944100 node DatanodeRegistration(127.0.0.1:35139, datanodeUuid=3a717171-4bca-4788-8f6a-d577ec78ab7f, infoPort=39067, infoSecurePort=0, ipcPort=37473, storageInfo=lv=-57;cid=testClusterID;nsid=1427826847;c=1731504937923), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-13T13:35:54,965 WARN [Thread-1357 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-13T13:35:54,968 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x9820a96cd6553d45 with lease ID 0x2b905e81678a495e: from storage DS-ab9a2161-65c5-434f-8fcb-2eae2752a222 node DatanodeRegistration(127.0.0.1:39479, datanodeUuid=0cea774c-18a7-48aa-896e-30dea8c1d062, infoPort=42621, infoSecurePort=0, ipcPort=37153, storageInfo=lv=-57;cid=testClusterID;nsid=1427826847;c=1731504937923), blocks: 6, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-13T13:35:54,968 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x9820a96cd6553d45 with lease ID 0x2b905e81678a495e: from storage DS-657cc954-4f23-4958-bcf2-ac3127d0b3fb node DatanodeRegistration(127.0.0.1:39479, datanodeUuid=0cea774c-18a7-48aa-896e-30dea8c1d062, infoPort=42621, infoSecurePort=0, ipcPort=37153, storageInfo=lv=-57;cid=testClusterID;nsid=1427826847;c=1731504937923), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-13T13:35:55,088 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,38781,1731504892507/bfeb2336aed7%2C38781%2C1731504892507.1731504892747 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:35:55,092 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,33523,1731504891245/bfeb2336aed7%2C33523%2C1731504891245.meta.1731504892258.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:35:55,466 INFO [Time-limited test {}] wal.TestLogRolling(372): Data Nodes restarted 2024-11-13T13:35:55,470 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1002 2024-11-13T13:35:55,472 ERROR [FSHLog-0-hdfs://localhost:46467/user/jenkins/test-data/02132352-6242-17a1-f9d1-08ae046913f2-prefix:bfeb2336aed7,45505,1731504940789 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36967,DS-ab9a2161-65c5-434f-8fcb-2eae2752a222,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T13:35:55,472 WARN [FSHLog-0-hdfs://localhost:46467/user/jenkins/test-data/02132352-6242-17a1-f9d1-08ae046913f2-prefix:bfeb2336aed7,45505,1731504940789 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36967,DS-ab9a2161-65c5-434f-8fcb-2eae2752a222,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T13:35:55,472 DEBUG [regionserver/bfeb2336aed7:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog bfeb2336aed7%2C45505%2C1731504940789:(num 1731504941418) roll requested 2024-11-13T13:35:55,472 INFO [regionserver/bfeb2336aed7:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor bfeb2336aed7%2C45505%2C1731504940789.1731504955472 2024-11-13T13:35:55,490 DEBUG [regionserver/bfeb2336aed7:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:46467/user/jenkins/test-data/02132352-6242-17a1-f9d1-08ae046913f2/WALs/bfeb2336aed7,45505,1731504940789/bfeb2336aed7%2C45505%2C1731504940789.1731504941418 newFile=hdfs://localhost:46467/user/jenkins/test-data/02132352-6242-17a1-f9d1-08ae046913f2/WALs/bfeb2336aed7,45505,1731504940789/bfeb2336aed7%2C45505%2C1731504940789.1731504955472 2024-11-13T13:35:55,491 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:35:55,491 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:35:55,491 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:35:55,491 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:35:55,491 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:35:55,492 INFO [regionserver/bfeb2336aed7:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/02132352-6242-17a1-f9d1-08ae046913f2/WALs/bfeb2336aed7,45505,1731504940789/bfeb2336aed7%2C45505%2C1731504940789.1731504941418 with entries=2, filesize=1.59 KB; new WAL /user/jenkins/test-data/02132352-6242-17a1-f9d1-08ae046913f2/WALs/bfeb2336aed7,45505,1731504940789/bfeb2336aed7%2C45505%2C1731504940789.1731504955472 2024-11-13T13:35:55,492 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36967,DS-ab9a2161-65c5-434f-8fcb-2eae2752a222,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T13:35:55,492 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36967,DS-ab9a2161-65c5-434f-8fcb-2eae2752a222,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T13:35:55,492 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:46467/user/jenkins/test-data/02132352-6242-17a1-f9d1-08ae046913f2/WALs/bfeb2336aed7,45505,1731504940789/bfeb2336aed7%2C45505%2C1731504940789.1731504941418 2024-11-13T13:35:55,493 WARN [IPC Server handler 0 on default port 46467 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/02132352-6242-17a1-f9d1-08ae046913f2/WALs/bfeb2336aed7,45505,1731504940789/bfeb2336aed7%2C45505%2C1731504940789.1731504941418 has not been closed. Lease recovery is in progress. RecoveryId = 1017 for block blk_1073741833_1014 2024-11-13T13:35:55,494 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:46467/user/jenkins/test-data/02132352-6242-17a1-f9d1-08ae046913f2/WALs/bfeb2336aed7,45505,1731504940789/bfeb2336aed7%2C45505%2C1731504940789.1731504941418 after 2ms 2024-11-13T13:35:55,500 DEBUG [regionserver/bfeb2336aed7:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42621:42621),(127.0.0.1/127.0.0.1:39067:39067)] 2024-11-13T13:35:55,501 DEBUG [regionserver/bfeb2336aed7:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:46467/user/jenkins/test-data/02132352-6242-17a1-f9d1-08ae046913f2/WALs/bfeb2336aed7,45505,1731504940789/bfeb2336aed7%2C45505%2C1731504940789.1731504941418 is not closed yet, will try archiving it next time 2024-11-13T13:35:56,089 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,38781,1731504892507/bfeb2336aed7%2C38781%2C1731504892507.1731504892747 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:35:56,093 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,33523,1731504891245/bfeb2336aed7%2C33523%2C1731504891245.meta.1731504892258.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:35:56,708 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741833_1014: GenerationStamp not matched, existing replica is blk_1073741833_1009 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-13T13:35:57,090 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,38781,1731504892507/bfeb2336aed7%2C38781%2C1731504892507.1731504892747 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:35:57,094 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,33523,1731504891245/bfeb2336aed7%2C33523%2C1731504891245.meta.1731504892258.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:35:57,505 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1003 2024-11-13T13:35:58,090 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,38781,1731504892507/bfeb2336aed7%2C38781%2C1731504892507.1731504892747 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:35:58,094 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,33523,1731504891245/bfeb2336aed7%2C33523%2C1731504891245.meta.1731504892258.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:35:59,091 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,38781,1731504892507/bfeb2336aed7%2C38781%2C1731504892507.1731504892747 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:35:59,095 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,33523,1731504891245/bfeb2336aed7%2C33523%2C1731504891245.meta.1731504892258.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:35:59,495 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:46467/user/jenkins/test-data/02132352-6242-17a1-f9d1-08ae046913f2/WALs/bfeb2336aed7,45505,1731504940789/bfeb2336aed7%2C45505%2C1731504940789.1731504941418 after 4003ms 2024-11-13T13:35:59,509 WARN [ResponseProcessor for block BP-1348924368-172.17.0.2-1731504937923:blk_1073741837_1016 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1348924368-172.17.0.2-1731504937923:blk_1073741837_1016 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T13:35:59,509 WARN [DataStreamer for file /user/jenkins/test-data/02132352-6242-17a1-f9d1-08ae046913f2/WALs/bfeb2336aed7,45505,1731504940789/bfeb2336aed7%2C45505%2C1731504940789.1731504955472 block BP-1348924368-172.17.0.2-1731504937923:blk_1073741837_1016 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1348924368-172.17.0.2-1731504937923:blk_1073741837_1016 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39479,DS-ab9a2161-65c5-434f-8fcb-2eae2752a222,DISK], DatanodeInfoWithStorage[127.0.0.1:35139,DS-6e6cf97b-09b5-430b-92bc-37ad8ab6214f,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39479,DS-ab9a2161-65c5-434f-8fcb-2eae2752a222,DISK]) is bad. 2024-11-13T13:35:59,509 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1003325979_22 at /127.0.0.1:51922 [Receiving block BP-1348924368-172.17.0.2-1731504937923:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:35139:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:51922 dst: /127.0.0.1:35139 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T13:35:59,509 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1003325979_22 at /127.0.0.1:41180 [Receiving block BP-1348924368-172.17.0.2-1731504937923:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:39479:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:41180 dst: /127.0.0.1:39479 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T13:35:59,597 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7180ac25{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-13T13:35:59,598 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@248d0d6a{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-13T13:35:59,598 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-13T13:35:59,598 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1ede944f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-13T13:35:59,598 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@75ed142f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1bfad055-190e-7fc9-ff21-38a64540517f/hadoop.log.dir/,STOPPED} 2024-11-13T13:35:59,600 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-13T13:35:59,600 WARN [BP-1348924368-172.17.0.2-1731504937923 heartbeating to localhost/127.0.0.1:46467 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-13T13:35:59,600 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-13T13:35:59,600 WARN [BP-1348924368-172.17.0.2-1731504937923 heartbeating to localhost/127.0.0.1:46467 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1348924368-172.17.0.2-1731504937923 (Datanode Uuid 0cea774c-18a7-48aa-896e-30dea8c1d062) service to localhost/127.0.0.1:46467 2024-11-13T13:35:59,600 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1bfad055-190e-7fc9-ff21-38a64540517f/cluster_7d13f452-3b8c-c473-f020-215ab3c51fcd/data/data1/current/BP-1348924368-172.17.0.2-1731504937923 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-13T13:35:59,601 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1bfad055-190e-7fc9-ff21-38a64540517f/cluster_7d13f452-3b8c-c473-f020-215ab3c51fcd/data/data2/current/BP-1348924368-172.17.0.2-1731504937923 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-13T13:35:59,601 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-13T13:35:59,610 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-13T13:35:59,613 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-13T13:35:59,614 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-13T13:35:59,614 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-13T13:35:59,614 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-13T13:35:59,615 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@19016e01{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1bfad055-190e-7fc9-ff21-38a64540517f/hadoop.log.dir/,AVAILABLE} 2024-11-13T13:35:59,615 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@ef50a45{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-13T13:35:59,721 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@6db938{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1bfad055-190e-7fc9-ff21-38a64540517f/java.io.tmpdir/jetty-localhost-33613-hadoop-hdfs-3_4_1-tests_jar-_-any-15215070392607701477/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-13T13:35:59,721 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5cf288ef{HTTP/1.1, (http/1.1)}{localhost:33613} 2024-11-13T13:35:59,721 INFO [Time-limited test {}] server.Server(415): Started @174409ms 2024-11-13T13:35:59,722 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-13T13:35:59,756 WARN [ResponseProcessor for block BP-1348924368-172.17.0.2-1731504937923:blk_1073741837_1018 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1348924368-172.17.0.2-1731504937923:blk_1073741837_1018 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T13:35:59,757 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1003325979_22 at /127.0.0.1:51938 [Receiving block BP-1348924368-172.17.0.2-1731504937923:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:35139:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:51938 dst: /127.0.0.1:35139 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T13:35:59,767 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4c5aa216{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-13T13:35:59,768 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@46332285{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-13T13:35:59,768 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-13T13:35:59,768 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@75f58649{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-13T13:35:59,768 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2460467f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1bfad055-190e-7fc9-ff21-38a64540517f/hadoop.log.dir/,STOPPED} 2024-11-13T13:35:59,769 WARN [BP-1348924368-172.17.0.2-1731504937923 heartbeating to localhost/127.0.0.1:46467 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-13T13:35:59,769 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-13T13:35:59,769 WARN [BP-1348924368-172.17.0.2-1731504937923 heartbeating to localhost/127.0.0.1:46467 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1348924368-172.17.0.2-1731504937923 (Datanode Uuid 3a717171-4bca-4788-8f6a-d577ec78ab7f) service to localhost/127.0.0.1:46467 2024-11-13T13:35:59,769 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-13T13:35:59,770 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-13T13:35:59,770 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1bfad055-190e-7fc9-ff21-38a64540517f/cluster_7d13f452-3b8c-c473-f020-215ab3c51fcd/data/data3/current/BP-1348924368-172.17.0.2-1731504937923 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-13T13:35:59,770 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1bfad055-190e-7fc9-ff21-38a64540517f/cluster_7d13f452-3b8c-c473-f020-215ab3c51fcd/data/data4/current/BP-1348924368-172.17.0.2-1731504937923 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-13T13:35:59,778 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-13T13:35:59,781 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-13T13:35:59,785 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-13T13:35:59,785 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-13T13:35:59,785 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-13T13:35:59,786 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6c39138a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1bfad055-190e-7fc9-ff21-38a64540517f/hadoop.log.dir/,AVAILABLE} 2024-11-13T13:35:59,786 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5f3e5a16{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-13T13:35:59,893 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@41b9791e{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1bfad055-190e-7fc9-ff21-38a64540517f/java.io.tmpdir/jetty-localhost-34801-hadoop-hdfs-3_4_1-tests_jar-_-any-15279393654486448068/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-13T13:35:59,893 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@1240a3cc{HTTP/1.1, (http/1.1)}{localhost:34801} 2024-11-13T13:35:59,893 INFO [Time-limited test {}] server.Server(415): Started @174581ms 2024-11-13T13:35:59,895 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-13T13:36:00,092 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,38781,1731504892507/bfeb2336aed7%2C38781%2C1731504892507.1731504892747 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:36:00,095 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,33523,1731504891245/bfeb2336aed7%2C33523%2C1731504891245.meta.1731504892258.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:36:00,345 WARN [Thread-1411 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-13T13:36:00,347 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x620effe013abc65f with lease ID 0x2b905e81678a495f: from storage DS-ab9a2161-65c5-434f-8fcb-2eae2752a222 node DatanodeRegistration(127.0.0.1:46417, datanodeUuid=0cea774c-18a7-48aa-896e-30dea8c1d062, infoPort=46861, infoSecurePort=0, ipcPort=36327, storageInfo=lv=-57;cid=testClusterID;nsid=1427826847;c=1731504937923), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-13T13:36:00,348 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x620effe013abc65f with lease ID 0x2b905e81678a495f: from storage DS-657cc954-4f23-4958-bcf2-ac3127d0b3fb node DatanodeRegistration(127.0.0.1:46417, datanodeUuid=0cea774c-18a7-48aa-896e-30dea8c1d062, infoPort=46861, infoSecurePort=0, ipcPort=36327, storageInfo=lv=-57;cid=testClusterID;nsid=1427826847;c=1731504937923), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-13T13:36:00,505 WARN [Thread-1431 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-13T13:36:00,507 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xbb9f5dc5079b534f with lease ID 0x2b905e81678a4960: from storage DS-6e6cf97b-09b5-430b-92bc-37ad8ab6214f node DatanodeRegistration(127.0.0.1:43769, datanodeUuid=3a717171-4bca-4788-8f6a-d577ec78ab7f, infoPort=37527, infoSecurePort=0, ipcPort=42553, storageInfo=lv=-57;cid=testClusterID;nsid=1427826847;c=1731504937923), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-13T13:36:00,507 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xbb9f5dc5079b534f with lease ID 0x2b905e81678a4960: from storage DS-0c2e36f5-bdf3-40e8-91b1-b4a686944100 node DatanodeRegistration(127.0.0.1:43769, datanodeUuid=3a717171-4bca-4788-8f6a-d577ec78ab7f, infoPort=37527, infoSecurePort=0, ipcPort=42553, storageInfo=lv=-57;cid=testClusterID;nsid=1427826847;c=1731504937923), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-13T13:36:00,913 INFO [Time-limited test {}] wal.TestLogRolling(389): Data Nodes restarted 2024-11-13T13:36:00,915 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1004 2024-11-13T13:36:00,916 ERROR [FSHLog-0-hdfs://localhost:46467/user/jenkins/test-data/02132352-6242-17a1-f9d1-08ae046913f2-prefix:bfeb2336aed7,45505,1731504940789 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35139,DS-6e6cf97b-09b5-430b-92bc-37ad8ab6214f,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T13:36:00,916 WARN [FSHLog-0-hdfs://localhost:46467/user/jenkins/test-data/02132352-6242-17a1-f9d1-08ae046913f2-prefix:bfeb2336aed7,45505,1731504940789 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35139,DS-6e6cf97b-09b5-430b-92bc-37ad8ab6214f,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T13:36:00,916 DEBUG [regionserver/bfeb2336aed7:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog bfeb2336aed7%2C45505%2C1731504940789:(num 1731504955472) roll requested 2024-11-13T13:36:00,917 INFO [regionserver/bfeb2336aed7:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor bfeb2336aed7%2C45505%2C1731504940789.1731504960917 2024-11-13T13:36:00,925 DEBUG [regionserver/bfeb2336aed7:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:46467/user/jenkins/test-data/02132352-6242-17a1-f9d1-08ae046913f2/WALs/bfeb2336aed7,45505,1731504940789/bfeb2336aed7%2C45505%2C1731504940789.1731504955472 newFile=hdfs://localhost:46467/user/jenkins/test-data/02132352-6242-17a1-f9d1-08ae046913f2/WALs/bfeb2336aed7,45505,1731504940789/bfeb2336aed7%2C45505%2C1731504940789.1731504960917 2024-11-13T13:36:00,925 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:36:00,925 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:36:00,925 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:36:00,925 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:36:00,925 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:36:00,925 INFO [regionserver/bfeb2336aed7:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/02132352-6242-17a1-f9d1-08ae046913f2/WALs/bfeb2336aed7,45505,1731504940789/bfeb2336aed7%2C45505%2C1731504940789.1731504955472 with entries=2, filesize=2.37 KB; new WAL /user/jenkins/test-data/02132352-6242-17a1-f9d1-08ae046913f2/WALs/bfeb2336aed7,45505,1731504940789/bfeb2336aed7%2C45505%2C1731504940789.1731504960917 2024-11-13T13:36:00,925 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35139,DS-6e6cf97b-09b5-430b-92bc-37ad8ab6214f,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T13:36:00,926 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35139,DS-6e6cf97b-09b5-430b-92bc-37ad8ab6214f,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T13:36:00,926 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:46467/user/jenkins/test-data/02132352-6242-17a1-f9d1-08ae046913f2/WALs/bfeb2336aed7,45505,1731504940789/bfeb2336aed7%2C45505%2C1731504940789.1731504955472 2024-11-13T13:36:00,926 DEBUG [regionserver/bfeb2336aed7:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46861:46861),(127.0.0.1/127.0.0.1:37527:37527)] 2024-11-13T13:36:00,926 DEBUG [regionserver/bfeb2336aed7:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:46467/user/jenkins/test-data/02132352-6242-17a1-f9d1-08ae046913f2/WALs/bfeb2336aed7,45505,1731504940789/bfeb2336aed7%2C45505%2C1731504940789.1731504955472 is not closed yet, will try archiving it next time 2024-11-13T13:36:00,926 WARN [IPC Server handler 1 on default port 46467 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/02132352-6242-17a1-f9d1-08ae046913f2/WALs/bfeb2336aed7,45505,1731504940789/bfeb2336aed7%2C45505%2C1731504940789.1731504955472 has not been closed. Lease recovery is in progress. RecoveryId = 1020 for block blk_1073741837_1018 2024-11-13T13:36:00,926 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:46467/user/jenkins/test-data/02132352-6242-17a1-f9d1-08ae046913f2/WALs/bfeb2336aed7,45505,1731504940789/bfeb2336aed7%2C45505%2C1731504940789.1731504955472 after 0ms 2024-11-13T13:36:01,092 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,38781,1731504892507/bfeb2336aed7%2C38781%2C1731504892507.1731504892747 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:36:01,096 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,33523,1731504891245/bfeb2336aed7%2C33523%2C1731504891245.meta.1731504892258.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:36:02,093 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,38781,1731504892507/bfeb2336aed7%2C38781%2C1731504892507.1731504892747 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:36:02,096 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,33523,1731504891245/bfeb2336aed7%2C33523%2C1731504891245.meta.1731504892258.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:36:02,928 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor bfeb2336aed7%2C45505%2C1731504940789.1731504962927 2024-11-13T13:36:02,935 DEBUG [Time-limited test {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:46467/user/jenkins/test-data/02132352-6242-17a1-f9d1-08ae046913f2/WALs/bfeb2336aed7,45505,1731504940789/bfeb2336aed7%2C45505%2C1731504940789.1731504960917 newFile=hdfs://localhost:46467/user/jenkins/test-data/02132352-6242-17a1-f9d1-08ae046913f2/WALs/bfeb2336aed7,45505,1731504940789/bfeb2336aed7%2C45505%2C1731504940789.1731504962927 2024-11-13T13:36:02,935 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:36:02,935 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:36:02,936 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:36:02,936 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:36:02,936 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:36:02,936 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/02132352-6242-17a1-f9d1-08ae046913f2/WALs/bfeb2336aed7,45505,1731504940789/bfeb2336aed7%2C45505%2C1731504940789.1731504960917 with entries=1, filesize=1.23 KB; new WAL /user/jenkins/test-data/02132352-6242-17a1-f9d1-08ae046913f2/WALs/bfeb2336aed7,45505,1731504940789/bfeb2336aed7%2C45505%2C1731504940789.1731504962927 2024-11-13T13:36:02,937 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37527:37527),(127.0.0.1/127.0.0.1:46861:46861)] 2024-11-13T13:36:02,938 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:46467/user/jenkins/test-data/02132352-6242-17a1-f9d1-08ae046913f2/WALs/bfeb2336aed7,45505,1731504940789/bfeb2336aed7%2C45505%2C1731504940789.1731504955472 is not closed yet, will try archiving it next time 2024-11-13T13:36:02,938 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:46467/user/jenkins/test-data/02132352-6242-17a1-f9d1-08ae046913f2/WALs/bfeb2336aed7,45505,1731504940789/bfeb2336aed7%2C45505%2C1731504940789.1731504960917 is not closed yet, will try archiving it next time 2024-11-13T13:36:02,938 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:46467/user/jenkins/test-data/02132352-6242-17a1-f9d1-08ae046913f2/WALs/bfeb2336aed7,45505,1731504940789/bfeb2336aed7%2C45505%2C1731504940789.1731504941418 2024-11-13T13:36:02,938 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:46467/user/jenkins/test-data/02132352-6242-17a1-f9d1-08ae046913f2/WALs/bfeb2336aed7,45505,1731504940789/bfeb2336aed7%2C45505%2C1731504940789.1731504941418 2024-11-13T13:36:02,939 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43769 is added to blk_1073741838_1019 (size=1264) 2024-11-13T13:36:02,939 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46417 is added to blk_1073741838_1019 (size=1264) 2024-11-13T13:36:02,939 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:46467/user/jenkins/test-data/02132352-6242-17a1-f9d1-08ae046913f2/WALs/bfeb2336aed7,45505,1731504940789/bfeb2336aed7%2C45505%2C1731504940789.1731504941418 after 1ms 2024-11-13T13:36:02,940 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/02132352-6242-17a1-f9d1-08ae046913f2/WALs/bfeb2336aed7,45505,1731504940789/bfeb2336aed7%2C45505%2C1731504940789.1731504941418 2024-11-13T13:36:02,940 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:46467/user/jenkins/test-data/02132352-6242-17a1-f9d1-08ae046913f2/WALs/bfeb2336aed7,45505,1731504940789/bfeb2336aed7%2C45505%2C1731504940789.1731504955472 is not closed yet, will try archiving it next time 2024-11-13T13:36:02,951 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #3: [\x00/METAFAMILY:HBASE::REGION_EVENT::REGION_OPEN/1731504942303/Put/vlen=218/seqid=0] 2024-11-13T13:36:02,951 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #4: [row1002/info:/1731504951983/Put/vlen=1045/seqid=0] 2024-11-13T13:36:02,951 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/02132352-6242-17a1-f9d1-08ae046913f2/WALs/bfeb2336aed7,45505,1731504940789/bfeb2336aed7%2C45505%2C1731504940789.1731504941418 2024-11-13T13:36:02,952 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:46467/user/jenkins/test-data/02132352-6242-17a1-f9d1-08ae046913f2/WALs/bfeb2336aed7,45505,1731504940789/bfeb2336aed7%2C45505%2C1731504940789.1731504955472 2024-11-13T13:36:02,952 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:46467/user/jenkins/test-data/02132352-6242-17a1-f9d1-08ae046913f2/WALs/bfeb2336aed7,45505,1731504940789/bfeb2336aed7%2C45505%2C1731504940789.1731504955472 2024-11-13T13:36:02,952 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:46467/user/jenkins/test-data/02132352-6242-17a1-f9d1-08ae046913f2/WALs/bfeb2336aed7,45505,1731504940789/bfeb2336aed7%2C45505%2C1731504940789.1731504955472 after 0ms 2024-11-13T13:36:02,952 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/02132352-6242-17a1-f9d1-08ae046913f2/WALs/bfeb2336aed7,45505,1731504940789/bfeb2336aed7%2C45505%2C1731504940789.1731504955472 2024-11-13T13:36:02,962 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #5: [row1003/info:/1731504955471/Put/vlen=1045/seqid=0] 2024-11-13T13:36:02,962 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #6: [row1004/info:/1731504957506/Put/vlen=1045/seqid=0] 2024-11-13T13:36:02,962 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/02132352-6242-17a1-f9d1-08ae046913f2/WALs/bfeb2336aed7,45505,1731504940789/bfeb2336aed7%2C45505%2C1731504940789.1731504955472 2024-11-13T13:36:02,962 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:46467/user/jenkins/test-data/02132352-6242-17a1-f9d1-08ae046913f2/WALs/bfeb2336aed7,45505,1731504940789/bfeb2336aed7%2C45505%2C1731504940789.1731504960917 2024-11-13T13:36:02,962 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:46467/user/jenkins/test-data/02132352-6242-17a1-f9d1-08ae046913f2/WALs/bfeb2336aed7,45505,1731504940789/bfeb2336aed7%2C45505%2C1731504940789.1731504960917 2024-11-13T13:36:02,963 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:46467/user/jenkins/test-data/02132352-6242-17a1-f9d1-08ae046913f2/WALs/bfeb2336aed7,45505,1731504940789/bfeb2336aed7%2C45505%2C1731504940789.1731504960917 after 1ms 2024-11-13T13:36:02,963 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/02132352-6242-17a1-f9d1-08ae046913f2/WALs/bfeb2336aed7,45505,1731504940789/bfeb2336aed7%2C45505%2C1731504940789.1731504960917 2024-11-13T13:36:02,966 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #7: [row1005/info:/1731504960916/Put/vlen=1045/seqid=0] 2024-11-13T13:36:02,966 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:46467/user/jenkins/test-data/02132352-6242-17a1-f9d1-08ae046913f2/WALs/bfeb2336aed7,45505,1731504940789/bfeb2336aed7%2C45505%2C1731504940789.1731504962927 2024-11-13T13:36:02,966 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:46467/user/jenkins/test-data/02132352-6242-17a1-f9d1-08ae046913f2/WALs/bfeb2336aed7,45505,1731504940789/bfeb2336aed7%2C45505%2C1731504940789.1731504962927 2024-11-13T13:36:02,967 WARN [IPC Server handler 1 on default port 46467 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/02132352-6242-17a1-f9d1-08ae046913f2/WALs/bfeb2336aed7,45505,1731504940789/bfeb2336aed7%2C45505%2C1731504940789.1731504962927 has not been closed. Lease recovery is in progress. RecoveryId = 1022 for block blk_1073741839_1021 2024-11-13T13:36:02,967 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:46467/user/jenkins/test-data/02132352-6242-17a1-f9d1-08ae046913f2/WALs/bfeb2336aed7,45505,1731504940789/bfeb2336aed7%2C45505%2C1731504940789.1731504962927 after 1ms 2024-11-13T13:36:03,094 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,38781,1731504892507/bfeb2336aed7%2C38781%2C1731504892507.1731504892747 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:36:03,097 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,33523,1731504891245/bfeb2336aed7%2C33523%2C1731504891245.meta.1731504892258.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:36:03,508 WARN [ResponseProcessor for block BP-1348924368-172.17.0.2-1731504937923:blk_1073741839_1021 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1348924368-172.17.0.2-1731504937923:blk_1073741839_1021 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T13:36:03,508 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1647991966_22 at /127.0.0.1:52490 [Receiving block BP-1348924368-172.17.0.2-1731504937923:blk_1073741839_1021] {}] datanode.DataXceiver(331): 127.0.0.1:43769:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:52490 dst: /127.0.0.1:43769 java.io.InterruptedIOException: Interrupted while waiting for IO on channel java.nio.channels.SocketChannel[connected local=localhost/127.0.0.1:43769 remote=/127.0.0.1:52490]. Total timeout mills is 60000, 59427 millis timeout left. at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:350) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T13:36:03,508 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1647991966_22 at /127.0.0.1:49640 [Receiving block BP-1348924368-172.17.0.2-1731504937923:blk_1073741839_1021] {}] datanode.DataXceiver(331): 127.0.0.1:46417:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:49640 dst: /127.0.0.1:46417 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T13:36:03,508 WARN [DataStreamer for file /user/jenkins/test-data/02132352-6242-17a1-f9d1-08ae046913f2/WALs/bfeb2336aed7,45505,1731504940789/bfeb2336aed7%2C45505%2C1731504940789.1731504962927 block BP-1348924368-172.17.0.2-1731504937923:blk_1073741839_1021 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1348924368-172.17.0.2-1731504937923:blk_1073741839_1021 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43769,DS-6e6cf97b-09b5-430b-92bc-37ad8ab6214f,DISK], DatanodeInfoWithStorage[127.0.0.1:46417,DS-ab9a2161-65c5-434f-8fcb-2eae2752a222,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:43769,DS-6e6cf97b-09b5-430b-92bc-37ad8ab6214f,DISK]) is bad. 2024-11-13T13:36:03,510 WARN [DataStreamer for file /user/jenkins/test-data/02132352-6242-17a1-f9d1-08ae046913f2/WALs/bfeb2336aed7,45505,1731504940789/bfeb2336aed7%2C45505%2C1731504940789.1731504962927 block BP-1348924368-172.17.0.2-1731504937923:blk_1073741839_1021 {}] hdfs.DataStreamer(859): DataStreamer Exception org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-1348924368-172.17.0.2-1731504937923:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T13:36:03,516 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43769 is added to blk_1073741839_1022 (size=85) 2024-11-13T13:36:03,518 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46417 is added to blk_1073741839_1022 (size=85) 2024-11-13T13:36:04,095 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,38781,1731504892507/bfeb2336aed7%2C38781%2C1731504892507.1731504892747 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:36:04,097 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,33523,1731504891245/bfeb2336aed7%2C33523%2C1731504891245.meta.1731504892258.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:36:04,927 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:46467/user/jenkins/test-data/02132352-6242-17a1-f9d1-08ae046913f2/WALs/bfeb2336aed7,45505,1731504940789/bfeb2336aed7%2C45505%2C1731504940789.1731504955472 after 4001ms 2024-11-13T13:36:05,095 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,38781,1731504892507/bfeb2336aed7%2C38781%2C1731504892507.1731504892747 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:36:05,098 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,33523,1731504891245/bfeb2336aed7%2C33523%2C1731504891245.meta.1731504892258.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:36:06,096 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,38781,1731504892507/bfeb2336aed7%2C38781%2C1731504892507.1731504892747 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:36:06,098 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,33523,1731504891245/bfeb2336aed7%2C33523%2C1731504891245.meta.1731504892258.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:36:06,348 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741837_1018: GenerationStamp not matched, existing replica is blk_1073741837_1016 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-13T13:36:06,968 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:46467/user/jenkins/test-data/02132352-6242-17a1-f9d1-08ae046913f2/WALs/bfeb2336aed7,45505,1731504940789/bfeb2336aed7%2C45505%2C1731504940789.1731504962927 after 4002ms 2024-11-13T13:36:06,968 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/02132352-6242-17a1-f9d1-08ae046913f2/WALs/bfeb2336aed7,45505,1731504940789/bfeb2336aed7%2C45505%2C1731504940789.1731504962927 2024-11-13T13:36:06,973 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/02132352-6242-17a1-f9d1-08ae046913f2/WALs/bfeb2336aed7,45505,1731504940789/bfeb2336aed7%2C45505%2C1731504940789.1731504962927 2024-11-13T13:36:06,974 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 1b0b3fe3cfd275f72fe8b5466cb336bf 1/1 column families, dataSize=4.20 KB heapSize=4.75 KB 2024-11-13T13:36:06,974 ERROR [FSHLog-0-hdfs://localhost:46467/user/jenkins/test-data/02132352-6242-17a1-f9d1-08ae046913f2-prefix:bfeb2336aed7,45505,1731504940789 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-1348924368-172.17.0.2-1731504937923:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T13:36:06,975 WARN [FSHLog-0-hdfs://localhost:46467/user/jenkins/test-data/02132352-6242-17a1-f9d1-08ae046913f2-prefix:bfeb2336aed7,45505,1731504940789 {}] wal.AbstractFSWAL(2174): append entry failed org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-1348924368-172.17.0.2-1731504937923:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T13:36:06,975 DEBUG [regionserver/bfeb2336aed7:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog bfeb2336aed7%2C45505%2C1731504940789:(num 1731504962927) roll requested 2024-11-13T13:36:06,976 INFO [regionserver/bfeb2336aed7:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor bfeb2336aed7%2C45505%2C1731504940789.1731504966975 2024-11-13T13:36:06,991 DEBUG [regionserver/bfeb2336aed7:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:46467/user/jenkins/test-data/02132352-6242-17a1-f9d1-08ae046913f2/WALs/bfeb2336aed7,45505,1731504940789/bfeb2336aed7%2C45505%2C1731504940789.1731504962927 newFile=hdfs://localhost:46467/user/jenkins/test-data/02132352-6242-17a1-f9d1-08ae046913f2/WALs/bfeb2336aed7,45505,1731504940789/bfeb2336aed7%2C45505%2C1731504940789.1731504966975 2024-11-13T13:36:06,991 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:36:06,991 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:36:06,991 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:36:06,992 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:36:06,992 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:36:06,992 INFO [regionserver/bfeb2336aed7:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/02132352-6242-17a1-f9d1-08ae046913f2/WALs/bfeb2336aed7,45505,1731504940789/bfeb2336aed7%2C45505%2C1731504940789.1731504962927 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/02132352-6242-17a1-f9d1-08ae046913f2/WALs/bfeb2336aed7,45505,1731504940789/bfeb2336aed7%2C45505%2C1731504940789.1731504966975 2024-11-13T13:36:06,992 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-1348924368-172.17.0.2-1731504937923:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T13:36:06,992 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-1348924368-172.17.0.2-1731504937923:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T13:36:06,993 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:46467/user/jenkins/test-data/02132352-6242-17a1-f9d1-08ae046913f2/WALs/bfeb2336aed7,45505,1731504940789/bfeb2336aed7%2C45505%2C1731504940789.1731504962927 2024-11-13T13:36:06,993 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:46467/user/jenkins/test-data/02132352-6242-17a1-f9d1-08ae046913f2/WALs/bfeb2336aed7,45505,1731504940789/bfeb2336aed7%2C45505%2C1731504940789.1731504962927 after 0ms 2024-11-13T13:36:06,996 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:46467/user/jenkins/test-data/02132352-6242-17a1-f9d1-08ae046913f2/WALs/bfeb2336aed7,45505,1731504940789/bfeb2336aed7%2C45505%2C1731504940789.1731504962927 to hdfs://localhost:46467/user/jenkins/test-data/02132352-6242-17a1-f9d1-08ae046913f2/oldWALs/bfeb2336aed7%2C45505%2C1731504940789.1731504962927 2024-11-13T13:36:06,997 DEBUG [regionserver/bfeb2336aed7:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46861:46861),(127.0.0.1/127.0.0.1:37527:37527)] 2024-11-13T13:36:07,014 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46467/user/jenkins/test-data/02132352-6242-17a1-f9d1-08ae046913f2/data/default/TestLogRolling-testLogRollOnPipelineRestart/1b0b3fe3cfd275f72fe8b5466cb336bf/.tmp/info/383ff2617b804b0eaa04e07cb1ae9385 is 1080, key is row1002/info:/1731504951983/Put/seqid=0 2024-11-13T13:36:07,022 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46417 is added to blk_1073741841_1024 (size=9270) 2024-11-13T13:36:07,022 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43769 is added to blk_1073741841_1024 (size=9270) 2024-11-13T13:36:07,022 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.20 KB at sequenceid=8 (bloomFilter=true), to=hdfs://localhost:46467/user/jenkins/test-data/02132352-6242-17a1-f9d1-08ae046913f2/data/default/TestLogRolling-testLogRollOnPipelineRestart/1b0b3fe3cfd275f72fe8b5466cb336bf/.tmp/info/383ff2617b804b0eaa04e07cb1ae9385 2024-11-13T13:36:07,030 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46467/user/jenkins/test-data/02132352-6242-17a1-f9d1-08ae046913f2/data/default/TestLogRolling-testLogRollOnPipelineRestart/1b0b3fe3cfd275f72fe8b5466cb336bf/.tmp/info/383ff2617b804b0eaa04e07cb1ae9385 as hdfs://localhost:46467/user/jenkins/test-data/02132352-6242-17a1-f9d1-08ae046913f2/data/default/TestLogRolling-testLogRollOnPipelineRestart/1b0b3fe3cfd275f72fe8b5466cb336bf/info/383ff2617b804b0eaa04e07cb1ae9385 2024-11-13T13:36:07,037 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46467/user/jenkins/test-data/02132352-6242-17a1-f9d1-08ae046913f2/data/default/TestLogRolling-testLogRollOnPipelineRestart/1b0b3fe3cfd275f72fe8b5466cb336bf/info/383ff2617b804b0eaa04e07cb1ae9385, entries=4, sequenceid=8, filesize=9.1 K 2024-11-13T13:36:07,038 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~4.20 KB/4304, heapSize ~4.73 KB/4848, currentSize=0 B/0 for 1b0b3fe3cfd275f72fe8b5466cb336bf in 65ms, sequenceid=8, compaction requested=false 2024-11-13T13:36:07,038 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 1b0b3fe3cfd275f72fe8b5466cb336bf: 2024-11-13T13:36:07,038 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.74 KB heapSize=3.77 KB 2024-11-13T13:36:07,038 ERROR [FSHLog-0-hdfs://localhost:46467/user/jenkins/test-data/02132352-6242-17a1-f9d1-08ae046913f2-prefix:bfeb2336aed7,45505,1731504940789.meta {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36967,DS-ab9a2161-65c5-434f-8fcb-2eae2752a222,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T13:36:07,039 WARN [FSHLog-0-hdfs://localhost:46467/user/jenkins/test-data/02132352-6242-17a1-f9d1-08ae046913f2-prefix:bfeb2336aed7,45505,1731504940789.meta {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36967,DS-ab9a2161-65c5-434f-8fcb-2eae2752a222,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T13:36:07,039 DEBUG [regionserver/bfeb2336aed7:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog bfeb2336aed7%2C45505%2C1731504940789.meta:.meta(num 1731504941786) roll requested 2024-11-13T13:36:07,039 INFO [regionserver/bfeb2336aed7:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor bfeb2336aed7%2C45505%2C1731504940789.meta.1731504967039.meta 2024-11-13T13:36:07,044 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:36:07,044 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:36:07,044 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:36:07,044 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:36:07,044 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:36:07,044 INFO [regionserver/bfeb2336aed7:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/02132352-6242-17a1-f9d1-08ae046913f2/WALs/bfeb2336aed7,45505,1731504940789/bfeb2336aed7%2C45505%2C1731504940789.meta.1731504941786.meta with entries=8, filesize=2.36 KB; new WAL /user/jenkins/test-data/02132352-6242-17a1-f9d1-08ae046913f2/WALs/bfeb2336aed7,45505,1731504940789/bfeb2336aed7%2C45505%2C1731504940789.meta.1731504967039.meta 2024-11-13T13:36:07,045 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36967,DS-ab9a2161-65c5-434f-8fcb-2eae2752a222,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T13:36:07,045 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36967,DS-ab9a2161-65c5-434f-8fcb-2eae2752a222,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T13:36:07,045 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:46467/user/jenkins/test-data/02132352-6242-17a1-f9d1-08ae046913f2/WALs/bfeb2336aed7,45505,1731504940789/bfeb2336aed7%2C45505%2C1731504940789.meta.1731504941786.meta 2024-11-13T13:36:07,045 DEBUG [regionserver/bfeb2336aed7:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46861:46861),(127.0.0.1/127.0.0.1:37527:37527)] 2024-11-13T13:36:07,045 DEBUG [regionserver/bfeb2336aed7:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:46467/user/jenkins/test-data/02132352-6242-17a1-f9d1-08ae046913f2/WALs/bfeb2336aed7,45505,1731504940789/bfeb2336aed7%2C45505%2C1731504940789.meta.1731504941786.meta is not closed yet, will try archiving it next time 2024-11-13T13:36:07,046 WARN [IPC Server handler 0 on default port 46467 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/02132352-6242-17a1-f9d1-08ae046913f2/WALs/bfeb2336aed7,45505,1731504940789/bfeb2336aed7%2C45505%2C1731504940789.meta.1731504941786.meta has not been closed. Lease recovery is in progress. RecoveryId = 1026 for block blk_1073741834_1013 2024-11-13T13:36:07,046 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:46467/user/jenkins/test-data/02132352-6242-17a1-f9d1-08ae046913f2/WALs/bfeb2336aed7,45505,1731504940789/bfeb2336aed7%2C45505%2C1731504940789.meta.1731504941786.meta after 1ms 2024-11-13T13:36:07,063 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46467/user/jenkins/test-data/02132352-6242-17a1-f9d1-08ae046913f2/data/hbase/meta/1588230740/.tmp/info/83d9ef74fc4e4e6696a3e7d5d04c4652 is 207, key is TestLogRolling-testLogRollOnPipelineRestart,,1731504941944.1b0b3fe3cfd275f72fe8b5466cb336bf./info:regioninfo/1731504942307/Put/seqid=0 2024-11-13T13:36:07,070 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46417 is added to blk_1073741843_1027 (size=7125) 2024-11-13T13:36:07,070 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43769 is added to blk_1073741843_1027 (size=7125) 2024-11-13T13:36:07,070 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.52 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:46467/user/jenkins/test-data/02132352-6242-17a1-f9d1-08ae046913f2/data/hbase/meta/1588230740/.tmp/info/83d9ef74fc4e4e6696a3e7d5d04c4652 2024-11-13T13:36:07,096 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46467/user/jenkins/test-data/02132352-6242-17a1-f9d1-08ae046913f2/data/hbase/meta/1588230740/.tmp/ns/820af51c6f5545499b186f7e7a25f862 is 43, key is default/ns:d/1731504941868/Put/seqid=0 2024-11-13T13:36:07,097 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,38781,1731504892507/bfeb2336aed7%2C38781%2C1731504892507.1731504892747 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:36:07,099 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,33523,1731504891245/bfeb2336aed7%2C33523%2C1731504891245.meta.1731504892258.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:36:07,101 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43769 is added to blk_1073741844_1028 (size=5153) 2024-11-13T13:36:07,101 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46417 is added to blk_1073741844_1028 (size=5153) 2024-11-13T13:36:07,102 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:46467/user/jenkins/test-data/02132352-6242-17a1-f9d1-08ae046913f2/data/hbase/meta/1588230740/.tmp/ns/820af51c6f5545499b186f7e7a25f862 2024-11-13T13:36:07,124 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46467/user/jenkins/test-data/02132352-6242-17a1-f9d1-08ae046913f2/data/hbase/meta/1588230740/.tmp/table/12fef123d69b42f09fa2165f102666af is 79, key is TestLogRolling-testLogRollOnPipelineRestart/table:state/1731504942317/Put/seqid=0 2024-11-13T13:36:07,130 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43769 is added to blk_1073741845_1029 (size=5438) 2024-11-13T13:36:07,130 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46417 is added to blk_1073741845_1029 (size=5438) 2024-11-13T13:36:07,133 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=150 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:46467/user/jenkins/test-data/02132352-6242-17a1-f9d1-08ae046913f2/data/hbase/meta/1588230740/.tmp/table/12fef123d69b42f09fa2165f102666af 2024-11-13T13:36:07,140 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46467/user/jenkins/test-data/02132352-6242-17a1-f9d1-08ae046913f2/data/hbase/meta/1588230740/.tmp/info/83d9ef74fc4e4e6696a3e7d5d04c4652 as hdfs://localhost:46467/user/jenkins/test-data/02132352-6242-17a1-f9d1-08ae046913f2/data/hbase/meta/1588230740/info/83d9ef74fc4e4e6696a3e7d5d04c4652 2024-11-13T13:36:07,148 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46467/user/jenkins/test-data/02132352-6242-17a1-f9d1-08ae046913f2/data/hbase/meta/1588230740/info/83d9ef74fc4e4e6696a3e7d5d04c4652, entries=10, sequenceid=11, filesize=7.0 K 2024-11-13T13:36:07,149 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46467/user/jenkins/test-data/02132352-6242-17a1-f9d1-08ae046913f2/data/hbase/meta/1588230740/.tmp/ns/820af51c6f5545499b186f7e7a25f862 as hdfs://localhost:46467/user/jenkins/test-data/02132352-6242-17a1-f9d1-08ae046913f2/data/hbase/meta/1588230740/ns/820af51c6f5545499b186f7e7a25f862 2024-11-13T13:36:07,156 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46467/user/jenkins/test-data/02132352-6242-17a1-f9d1-08ae046913f2/data/hbase/meta/1588230740/ns/820af51c6f5545499b186f7e7a25f862, entries=2, sequenceid=11, filesize=5.0 K 2024-11-13T13:36:07,157 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46467/user/jenkins/test-data/02132352-6242-17a1-f9d1-08ae046913f2/data/hbase/meta/1588230740/.tmp/table/12fef123d69b42f09fa2165f102666af as hdfs://localhost:46467/user/jenkins/test-data/02132352-6242-17a1-f9d1-08ae046913f2/data/hbase/meta/1588230740/table/12fef123d69b42f09fa2165f102666af 2024-11-13T13:36:07,164 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46467/user/jenkins/test-data/02132352-6242-17a1-f9d1-08ae046913f2/data/hbase/meta/1588230740/table/12fef123d69b42f09fa2165f102666af, entries=2, sequenceid=11, filesize=5.3 K 2024-11-13T13:36:07,165 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~1.74 KB/1782, heapSize ~3.48 KB/3560, currentSize=0 B/0 for 1588230740 in 127ms, sequenceid=11, compaction requested=false 2024-11-13T13:36:07,165 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-11-13T13:36:07,171 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-13T13:36:07,171 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-13T13:36:07,171 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-13T13:36:07,171 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T13:36:07,172 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T13:36:07,172 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-13T13:36:07,172 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-13T13:36:07,172 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=488243835, stopped=false 2024-11-13T13:36:07,172 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=bfeb2336aed7,36873,1731504940518 2024-11-13T13:36:07,230 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36873-0x1013468a1810000, quorum=127.0.0.1:53902, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-13T13:36:07,230 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45505-0x1013468a1810001, quorum=127.0.0.1:53902, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-13T13:36:07,230 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36873-0x1013468a1810000, quorum=127.0.0.1:53902, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T13:36:07,230 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-13T13:36:07,230 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45505-0x1013468a1810001, quorum=127.0.0.1:53902, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T13:36:07,231 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-13T13:36:07,231 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-13T13:36:07,231 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:36873-0x1013468a1810000, quorum=127.0.0.1:53902, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-13T13:36:07,231 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T13:36:07,231 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:45505-0x1013468a1810001, quorum=127.0.0.1:53902, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-13T13:36:07,232 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'bfeb2336aed7,45505,1731504940789' ***** 2024-11-13T13:36:07,232 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-13T13:36:07,232 INFO [RS:0;bfeb2336aed7:45505 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-13T13:36:07,232 INFO [RS:0;bfeb2336aed7:45505 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-13T13:36:07,232 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-13T13:36:07,232 INFO [RS:0;bfeb2336aed7:45505 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-13T13:36:07,232 INFO [RS:0;bfeb2336aed7:45505 {}] regionserver.HRegionServer(3091): Received CLOSE for 1b0b3fe3cfd275f72fe8b5466cb336bf 2024-11-13T13:36:07,233 INFO [RS:0;bfeb2336aed7:45505 {}] regionserver.HRegionServer(959): stopping server bfeb2336aed7,45505,1731504940789 2024-11-13T13:36:07,233 INFO [RS:0;bfeb2336aed7:45505 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-13T13:36:07,233 INFO [RS:0;bfeb2336aed7:45505 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;bfeb2336aed7:45505. 2024-11-13T13:36:07,233 DEBUG [RS:0;bfeb2336aed7:45505 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-13T13:36:07,233 DEBUG [RS_CLOSE_REGION-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 1b0b3fe3cfd275f72fe8b5466cb336bf, disabling compactions & flushes 2024-11-13T13:36:07,233 DEBUG [RS:0;bfeb2336aed7:45505 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T13:36:07,233 INFO [RS_CLOSE_REGION-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnPipelineRestart,,1731504941944.1b0b3fe3cfd275f72fe8b5466cb336bf. 2024-11-13T13:36:07,233 DEBUG [RS_CLOSE_REGION-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnPipelineRestart,,1731504941944.1b0b3fe3cfd275f72fe8b5466cb336bf. 2024-11-13T13:36:07,233 DEBUG [RS_CLOSE_REGION-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnPipelineRestart,,1731504941944.1b0b3fe3cfd275f72fe8b5466cb336bf. after waiting 0 ms 2024-11-13T13:36:07,233 DEBUG [RS_CLOSE_REGION-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnPipelineRestart,,1731504941944.1b0b3fe3cfd275f72fe8b5466cb336bf. 2024-11-13T13:36:07,233 INFO [RS:0;bfeb2336aed7:45505 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-13T13:36:07,233 INFO [RS:0;bfeb2336aed7:45505 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-13T13:36:07,233 INFO [RS:0;bfeb2336aed7:45505 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-13T13:36:07,233 INFO [RS:0;bfeb2336aed7:45505 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-13T13:36:07,233 INFO [RS:0;bfeb2336aed7:45505 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-13T13:36:07,233 DEBUG [RS:0;bfeb2336aed7:45505 {}] regionserver.HRegionServer(1325): Online Regions={1b0b3fe3cfd275f72fe8b5466cb336bf=TestLogRolling-testLogRollOnPipelineRestart,,1731504941944.1b0b3fe3cfd275f72fe8b5466cb336bf., 1588230740=hbase:meta,,1.1588230740} 2024-11-13T13:36:07,233 DEBUG [RS:0;bfeb2336aed7:45505 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 1b0b3fe3cfd275f72fe8b5466cb336bf 2024-11-13T13:36:07,233 DEBUG [RS_CLOSE_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-13T13:36:07,234 INFO [RS_CLOSE_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-13T13:36:07,234 DEBUG [RS_CLOSE_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-13T13:36:07,234 DEBUG [RS_CLOSE_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-13T13:36:07,234 DEBUG [RS_CLOSE_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-13T13:36:07,238 DEBUG [RS_CLOSE_REGION-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46467/user/jenkins/test-data/02132352-6242-17a1-f9d1-08ae046913f2/data/default/TestLogRolling-testLogRollOnPipelineRestart/1b0b3fe3cfd275f72fe8b5466cb336bf/recovered.edits/11.seqid, newMaxSeqId=11, maxSeqId=1 2024-11-13T13:36:07,239 INFO [RS_CLOSE_REGION-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnPipelineRestart,,1731504941944.1b0b3fe3cfd275f72fe8b5466cb336bf. 2024-11-13T13:36:07,239 DEBUG [RS_CLOSE_REGION-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 1b0b3fe3cfd275f72fe8b5466cb336bf: Waiting for close lock at 1731504967233Running coprocessor pre-close hooks at 1731504967233Disabling compacts and flushes for region at 1731504967233Disabling writes for close at 1731504967233Writing region close event to WAL at 1731504967233Running coprocessor post-close hooks at 1731504967239 (+6 ms)Closed at 1731504967239 2024-11-13T13:36:07,239 DEBUG [RS_CLOSE_REGION-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRollOnPipelineRestart,,1731504941944.1b0b3fe3cfd275f72fe8b5466cb336bf. 2024-11-13T13:36:07,240 DEBUG [RS_CLOSE_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46467/user/jenkins/test-data/02132352-6242-17a1-f9d1-08ae046913f2/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-13T13:36:07,240 DEBUG [RS_CLOSE_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-13T13:36:07,240 INFO [RS_CLOSE_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-13T13:36:07,240 DEBUG [RS_CLOSE_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731504967233Running coprocessor pre-close hooks at 1731504967233Disabling compacts and flushes for region at 1731504967233Disabling writes for close at 1731504967234 (+1 ms)Writing region close event to WAL at 1731504967236 (+2 ms)Running coprocessor post-close hooks at 1731504967240 (+4 ms)Closed at 1731504967240 2024-11-13T13:36:07,241 DEBUG [RS_CLOSE_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-13T13:36:07,285 INFO [regionserver/bfeb2336aed7:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-13T13:36:07,285 INFO [regionserver/bfeb2336aed7:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-13T13:36:07,285 INFO [regionserver/bfeb2336aed7:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-13T13:36:07,434 INFO [RS:0;bfeb2336aed7:45505 {}] regionserver.HRegionServer(976): stopping server bfeb2336aed7,45505,1731504940789; all regions closed. 2024-11-13T13:36:07,434 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:36:07,434 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:36:07,435 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:36:07,435 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:36:07,435 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:36:07,438 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43769 is added to blk_1073741842_1025 (size=825) 2024-11-13T13:36:07,439 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46417 is added to blk_1073741842_1025 (size=825) 2024-11-13T13:36:08,097 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,38781,1731504892507/bfeb2336aed7%2C38781%2C1731504892507.1731504892747 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:36:08,099 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,33523,1731504891245/bfeb2336aed7%2C33523%2C1731504891245.meta.1731504892258.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:36:09,098 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,38781,1731504892507/bfeb2336aed7%2C38781%2C1731504892507.1731504892747 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:36:09,100 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,33523,1731504891245/bfeb2336aed7%2C33523%2C1731504891245.meta.1731504892258.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:36:09,508 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741834_1013: GenerationStamp not matched, existing replica is blk_1073741834_1010 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-13T13:36:10,098 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,38781,1731504892507/bfeb2336aed7%2C38781%2C1731504892507.1731504892747 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:36:10,100 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,33523,1731504891245/bfeb2336aed7%2C33523%2C1731504891245.meta.1731504892258.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:36:10,500 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-13T13:36:11,046 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:46467/user/jenkins/test-data/02132352-6242-17a1-f9d1-08ae046913f2/WALs/bfeb2336aed7,45505,1731504940789/bfeb2336aed7%2C45505%2C1731504940789.meta.1731504941786.meta after 4001ms 2024-11-13T13:36:11,047 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:46467/user/jenkins/test-data/02132352-6242-17a1-f9d1-08ae046913f2/WALs/bfeb2336aed7,45505,1731504940789/bfeb2336aed7%2C45505%2C1731504940789.meta.1731504941786.meta to hdfs://localhost:46467/user/jenkins/test-data/02132352-6242-17a1-f9d1-08ae046913f2/oldWALs/bfeb2336aed7%2C45505%2C1731504940789.meta.1731504941786.meta 2024-11-13T13:36:11,050 DEBUG [RS:0;bfeb2336aed7:45505 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/02132352-6242-17a1-f9d1-08ae046913f2/oldWALs 2024-11-13T13:36:11,050 INFO [RS:0;bfeb2336aed7:45505 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog bfeb2336aed7%2C45505%2C1731504940789.meta:.meta(num 1731504967039) 2024-11-13T13:36:11,051 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:36:11,051 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:36:11,051 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:36:11,051 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:36:11,051 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:36:11,053 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43769 is added to blk_1073741840_1023 (size=1162) 2024-11-13T13:36:11,054 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46417 is added to blk_1073741840_1023 (size=1162) 2024-11-13T13:36:11,060 DEBUG [RS:0;bfeb2336aed7:45505 {}] wal.AbstractFSWAL(1256): Moved 4 WAL file(s) to /user/jenkins/test-data/02132352-6242-17a1-f9d1-08ae046913f2/oldWALs 2024-11-13T13:36:11,060 INFO [RS:0;bfeb2336aed7:45505 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog bfeb2336aed7%2C45505%2C1731504940789:(num 1731504966975) 2024-11-13T13:36:11,060 DEBUG [RS:0;bfeb2336aed7:45505 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T13:36:11,060 INFO [RS:0;bfeb2336aed7:45505 {}] regionserver.LeaseManager(133): Closed leases 2024-11-13T13:36:11,060 INFO [RS:0;bfeb2336aed7:45505 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-13T13:36:11,060 INFO [RS:0;bfeb2336aed7:45505 {}] hbase.ChoreService(370): Chore service for: regionserver/bfeb2336aed7:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-13T13:36:11,060 INFO [RS:0;bfeb2336aed7:45505 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-13T13:36:11,060 INFO [regionserver/bfeb2336aed7:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-13T13:36:11,060 INFO [RS:0;bfeb2336aed7:45505 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:45505 2024-11-13T13:36:11,083 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36873-0x1013468a1810000, quorum=127.0.0.1:53902, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-13T13:36:11,083 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45505-0x1013468a1810001, quorum=127.0.0.1:53902, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/bfeb2336aed7,45505,1731504940789 2024-11-13T13:36:11,083 INFO [RS:0;bfeb2336aed7:45505 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-13T13:36:11,093 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [bfeb2336aed7,45505,1731504940789] 2024-11-13T13:36:11,099 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,38781,1731504892507/bfeb2336aed7%2C38781%2C1731504892507.1731504892747 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:36:11,101 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,33523,1731504891245/bfeb2336aed7%2C33523%2C1731504891245.meta.1731504892258.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:36:11,104 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/bfeb2336aed7,45505,1731504940789 already deleted, retry=false 2024-11-13T13:36:11,104 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; bfeb2336aed7,45505,1731504940789 expired; onlineServers=0 2024-11-13T13:36:11,104 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'bfeb2336aed7,36873,1731504940518' ***** 2024-11-13T13:36:11,104 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-13T13:36:11,104 INFO [M:0;bfeb2336aed7:36873 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-13T13:36:11,104 INFO [M:0;bfeb2336aed7:36873 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-13T13:36:11,104 DEBUG [M:0;bfeb2336aed7:36873 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-13T13:36:11,104 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-13T13:36:11,104 DEBUG [M:0;bfeb2336aed7:36873 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-13T13:36:11,104 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster-HFileCleaner.large.0-1731504941158 {}] cleaner.HFileCleaner(306): Exit Thread[master/bfeb2336aed7:0:becomeActiveMaster-HFileCleaner.large.0-1731504941158,5,FailOnTimeoutGroup] 2024-11-13T13:36:11,104 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster-HFileCleaner.small.0-1731504941158 {}] cleaner.HFileCleaner(306): Exit Thread[master/bfeb2336aed7:0:becomeActiveMaster-HFileCleaner.small.0-1731504941158,5,FailOnTimeoutGroup] 2024-11-13T13:36:11,105 INFO [M:0;bfeb2336aed7:36873 {}] hbase.ChoreService(370): Chore service for: master/bfeb2336aed7:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-13T13:36:11,105 INFO [M:0;bfeb2336aed7:36873 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-13T13:36:11,105 DEBUG [M:0;bfeb2336aed7:36873 {}] master.HMaster(1795): Stopping service threads 2024-11-13T13:36:11,105 INFO [M:0;bfeb2336aed7:36873 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-13T13:36:11,105 INFO [M:0;bfeb2336aed7:36873 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-13T13:36:11,105 INFO [M:0;bfeb2336aed7:36873 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-13T13:36:11,106 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-13T13:36:11,114 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36873-0x1013468a1810000, quorum=127.0.0.1:53902, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-13T13:36:11,114 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36873-0x1013468a1810000, quorum=127.0.0.1:53902, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T13:36:11,114 DEBUG [M:0;bfeb2336aed7:36873 {}] zookeeper.ZKUtil(347): master:36873-0x1013468a1810000, quorum=127.0.0.1:53902, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-13T13:36:11,114 WARN [M:0;bfeb2336aed7:36873 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-13T13:36:11,115 INFO [M:0;bfeb2336aed7:36873 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:46467/user/jenkins/test-data/02132352-6242-17a1-f9d1-08ae046913f2/.lastflushedseqids 2024-11-13T13:36:11,123 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46417 is added to blk_1073741846_1030 (size=111) 2024-11-13T13:36:11,123 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43769 is added to blk_1073741846_1030 (size=111) 2024-11-13T13:36:11,123 INFO [M:0;bfeb2336aed7:36873 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-13T13:36:11,124 INFO [M:0;bfeb2336aed7:36873 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-13T13:36:11,124 DEBUG [M:0;bfeb2336aed7:36873 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-13T13:36:11,124 INFO [M:0;bfeb2336aed7:36873 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T13:36:11,124 DEBUG [M:0;bfeb2336aed7:36873 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T13:36:11,124 DEBUG [M:0;bfeb2336aed7:36873 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-13T13:36:11,124 DEBUG [M:0;bfeb2336aed7:36873 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T13:36:11,124 INFO [M:0;bfeb2336aed7:36873 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.16 KB heapSize=29.13 KB 2024-11-13T13:36:11,124 ERROR [FSHLog-0-hdfs://localhost:46467/user/jenkins/test-data/02132352-6242-17a1-f9d1-08ae046913f2/MasterData-prefix:bfeb2336aed7,36873,1731504940518 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36967,DS-ab9a2161-65c5-434f-8fcb-2eae2752a222,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T13:36:11,124 WARN [FSHLog-0-hdfs://localhost:46467/user/jenkins/test-data/02132352-6242-17a1-f9d1-08ae046913f2/MasterData-prefix:bfeb2336aed7,36873,1731504940518 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36967,DS-ab9a2161-65c5-434f-8fcb-2eae2752a222,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T13:36:11,125 DEBUG [master:store-WAL-Roller {}] wal.AbstractWALRoller(198): WAL FSHLog bfeb2336aed7%2C36873%2C1731504940518:(num 1731504940937) roll requested 2024-11-13T13:36:11,125 INFO [master:store-WAL-Roller {}] monitor.StreamSlowMonitor(122): New stream slow monitor bfeb2336aed7%2C36873%2C1731504940518.1731504971125 2024-11-13T13:36:11,131 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:36:11,131 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:36:11,131 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:36:11,131 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:36:11,131 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:36:11,131 INFO [master:store-WAL-Roller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/02132352-6242-17a1-f9d1-08ae046913f2/MasterData/WALs/bfeb2336aed7,36873,1731504940518/bfeb2336aed7%2C36873%2C1731504940518.1731504940937 with entries=53, filesize=26.61 KB; new WAL /user/jenkins/test-data/02132352-6242-17a1-f9d1-08ae046913f2/MasterData/WALs/bfeb2336aed7,36873,1731504940518/bfeb2336aed7%2C36873%2C1731504940518.1731504971125 2024-11-13T13:36:11,132 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36967,DS-ab9a2161-65c5-434f-8fcb-2eae2752a222,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T13:36:11,132 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36967,DS-ab9a2161-65c5-434f-8fcb-2eae2752a222,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T13:36:11,132 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:46467/user/jenkins/test-data/02132352-6242-17a1-f9d1-08ae046913f2/MasterData/WALs/bfeb2336aed7,36873,1731504940518/bfeb2336aed7%2C36873%2C1731504940518.1731504940937 2024-11-13T13:36:11,132 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37527:37527),(127.0.0.1/127.0.0.1:46861:46861)] 2024-11-13T13:36:11,132 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(879): hdfs://localhost:46467/user/jenkins/test-data/02132352-6242-17a1-f9d1-08ae046913f2/MasterData/WALs/bfeb2336aed7,36873,1731504940518/bfeb2336aed7%2C36873%2C1731504940518.1731504940937 is not closed yet, will try archiving it next time 2024-11-13T13:36:11,132 WARN [IPC Server handler 0 on default port 46467 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/02132352-6242-17a1-f9d1-08ae046913f2/MasterData/WALs/bfeb2336aed7,36873,1731504940518/bfeb2336aed7%2C36873%2C1731504940518.1731504940937 has not been closed. Lease recovery is in progress. RecoveryId = 1032 for block blk_1073741830_1015 2024-11-13T13:36:11,132 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:46467/user/jenkins/test-data/02132352-6242-17a1-f9d1-08ae046913f2/MasterData/WALs/bfeb2336aed7,36873,1731504940518/bfeb2336aed7%2C36873%2C1731504940518.1731504940937 after 0ms 2024-11-13T13:36:11,151 DEBUG [M:0;bfeb2336aed7:36873 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46467/user/jenkins/test-data/02132352-6242-17a1-f9d1-08ae046913f2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/36f44ade7944448ba5f028607e48690f is 82, key is hbase:meta,,1/info:regioninfo/1731504941813/Put/seqid=0 2024-11-13T13:36:11,156 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43769 is added to blk_1073741848_1033 (size=5672) 2024-11-13T13:36:11,156 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46417 is added to blk_1073741848_1033 (size=5672) 2024-11-13T13:36:11,157 INFO [M:0;bfeb2336aed7:36873 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:46467/user/jenkins/test-data/02132352-6242-17a1-f9d1-08ae046913f2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/36f44ade7944448ba5f028607e48690f 2024-11-13T13:36:11,179 DEBUG [M:0;bfeb2336aed7:36873 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46467/user/jenkins/test-data/02132352-6242-17a1-f9d1-08ae046913f2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/eb6029179d9140ba96cea85fdc8095e0 is 777, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1731504942321/Put/seqid=0 2024-11-13T13:36:11,183 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46417 is added to blk_1073741849_1034 (size=6117) 2024-11-13T13:36:11,183 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43769 is added to blk_1073741849_1034 (size=6117) 2024-11-13T13:36:11,184 INFO [M:0;bfeb2336aed7:36873 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.56 KB at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:46467/user/jenkins/test-data/02132352-6242-17a1-f9d1-08ae046913f2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/eb6029179d9140ba96cea85fdc8095e0 2024-11-13T13:36:11,193 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45505-0x1013468a1810001, quorum=127.0.0.1:53902, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-13T13:36:11,193 INFO [RS:0;bfeb2336aed7:45505 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-13T13:36:11,193 INFO [RS:0;bfeb2336aed7:45505 {}] regionserver.HRegionServer(1031): Exiting; stopping=bfeb2336aed7,45505,1731504940789; zookeeper connection closed. 2024-11-13T13:36:11,193 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45505-0x1013468a1810001, quorum=127.0.0.1:53902, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-13T13:36:11,194 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@d78ecab {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@d78ecab 2024-11-13T13:36:11,194 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-13T13:36:11,206 DEBUG [M:0;bfeb2336aed7:36873 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46467/user/jenkins/test-data/02132352-6242-17a1-f9d1-08ae046913f2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/c82f04503319449a873e6a08392958a0 is 69, key is bfeb2336aed7,45505,1731504940789/rs:state/1731504941265/Put/seqid=0 2024-11-13T13:36:11,210 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46417 is added to blk_1073741850_1035 (size=5156) 2024-11-13T13:36:11,210 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43769 is added to blk_1073741850_1035 (size=5156) 2024-11-13T13:36:11,211 INFO [M:0;bfeb2336aed7:36873 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:46467/user/jenkins/test-data/02132352-6242-17a1-f9d1-08ae046913f2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/c82f04503319449a873e6a08392958a0 2024-11-13T13:36:11,229 DEBUG [M:0;bfeb2336aed7:36873 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46467/user/jenkins/test-data/02132352-6242-17a1-f9d1-08ae046913f2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/db52e61426894692837c03ce15b629b0 is 52, key is load_balancer_on/state:d/1731504941938/Put/seqid=0 2024-11-13T13:36:11,233 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46417 is added to blk_1073741851_1036 (size=5056) 2024-11-13T13:36:11,234 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43769 is added to blk_1073741851_1036 (size=5056) 2024-11-13T13:36:11,234 INFO [M:0;bfeb2336aed7:36873 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:46467/user/jenkins/test-data/02132352-6242-17a1-f9d1-08ae046913f2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/db52e61426894692837c03ce15b629b0 2024-11-13T13:36:11,240 DEBUG [M:0;bfeb2336aed7:36873 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46467/user/jenkins/test-data/02132352-6242-17a1-f9d1-08ae046913f2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/36f44ade7944448ba5f028607e48690f as hdfs://localhost:46467/user/jenkins/test-data/02132352-6242-17a1-f9d1-08ae046913f2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/36f44ade7944448ba5f028607e48690f 2024-11-13T13:36:11,245 INFO [M:0;bfeb2336aed7:36873 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46467/user/jenkins/test-data/02132352-6242-17a1-f9d1-08ae046913f2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/36f44ade7944448ba5f028607e48690f, entries=8, sequenceid=56, filesize=5.5 K 2024-11-13T13:36:11,246 DEBUG [M:0;bfeb2336aed7:36873 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46467/user/jenkins/test-data/02132352-6242-17a1-f9d1-08ae046913f2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/eb6029179d9140ba96cea85fdc8095e0 as hdfs://localhost:46467/user/jenkins/test-data/02132352-6242-17a1-f9d1-08ae046913f2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/eb6029179d9140ba96cea85fdc8095e0 2024-11-13T13:36:11,252 INFO [M:0;bfeb2336aed7:36873 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46467/user/jenkins/test-data/02132352-6242-17a1-f9d1-08ae046913f2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/eb6029179d9140ba96cea85fdc8095e0, entries=6, sequenceid=56, filesize=6.0 K 2024-11-13T13:36:11,253 DEBUG [M:0;bfeb2336aed7:36873 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46467/user/jenkins/test-data/02132352-6242-17a1-f9d1-08ae046913f2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/c82f04503319449a873e6a08392958a0 as hdfs://localhost:46467/user/jenkins/test-data/02132352-6242-17a1-f9d1-08ae046913f2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/c82f04503319449a873e6a08392958a0 2024-11-13T13:36:11,259 INFO [M:0;bfeb2336aed7:36873 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46467/user/jenkins/test-data/02132352-6242-17a1-f9d1-08ae046913f2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/c82f04503319449a873e6a08392958a0, entries=1, sequenceid=56, filesize=5.0 K 2024-11-13T13:36:11,261 DEBUG [M:0;bfeb2336aed7:36873 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46467/user/jenkins/test-data/02132352-6242-17a1-f9d1-08ae046913f2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/db52e61426894692837c03ce15b629b0 as hdfs://localhost:46467/user/jenkins/test-data/02132352-6242-17a1-f9d1-08ae046913f2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/db52e61426894692837c03ce15b629b0 2024-11-13T13:36:11,266 INFO [M:0;bfeb2336aed7:36873 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46467/user/jenkins/test-data/02132352-6242-17a1-f9d1-08ae046913f2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/db52e61426894692837c03ce15b629b0, entries=1, sequenceid=56, filesize=4.9 K 2024-11-13T13:36:11,267 INFO [M:0;bfeb2336aed7:36873 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.16 KB/23714, heapSize ~29.07 KB/29768, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 143ms, sequenceid=56, compaction requested=false 2024-11-13T13:36:11,268 INFO [M:0;bfeb2336aed7:36873 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T13:36:11,268 DEBUG [M:0;bfeb2336aed7:36873 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731504971124Disabling compacts and flushes for region at 1731504971124Disabling writes for close at 1731504971124Obtaining lock to block concurrent updates at 1731504971124Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731504971124Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23714, getHeapSize=29768, getOffHeapSize=0, getCellsCount=67 at 1731504971124Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731504971133 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731504971133Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731504971151 (+18 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731504971151Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731504971162 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731504971179 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731504971179Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731504971188 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731504971205 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731504971205Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731504971215 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731504971229 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731504971229Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@25d37d15: reopening flushed file at 1731504971239 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4066a5f: reopening flushed file at 1731504971245 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@562ac8b0: reopening flushed file at 1731504971252 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@743560fd: reopening flushed file at 1731504971260 (+8 ms)Finished flush of dataSize ~23.16 KB/23714, heapSize ~29.07 KB/29768, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 143ms, sequenceid=56, compaction requested=false at 1731504971267 (+7 ms)Writing region close event to WAL at 1731504971268 (+1 ms)Closed at 1731504971268 2024-11-13T13:36:11,268 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:36:11,268 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:36:11,269 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:36:11,269 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:36:11,269 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:36:11,270 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46417 is added to blk_1073741847_1031 (size=757) 2024-11-13T13:36:11,270 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43769 is added to blk_1073741847_1031 (size=757) 2024-11-13T13:36:11,948 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-13T13:36:11,948 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-13T13:36:11,949 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-13T13:36:11,949 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart 2024-11-13T13:36:12,100 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,38781,1731504892507/bfeb2336aed7%2C38781%2C1731504892507.1731504892747 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:36:12,101 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,33523,1731504891245/bfeb2336aed7%2C33523%2C1731504891245.meta.1731504892258.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:36:12,239 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:36:12,240 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:36:12,260 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:36:12,261 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:36:12,261 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:36:12,261 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:36:12,261 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:36:12,261 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:36:12,265 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:36:12,265 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:36:12,266 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:36:12,267 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:36:12,271 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:36:12,271 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:36:12,509 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741830_1015: GenerationStamp not matched, existing replica is blk_1073741830_1006 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-13T13:36:12,774 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-13T13:36:12,775 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:36:12,775 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:36:12,776 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:36:12,776 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:36:12,803 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:36:12,803 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:36:12,803 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:36:12,803 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:36:12,804 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:36:12,804 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:36:12,807 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:36:12,808 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:36:12,808 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:36:12,810 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:36:13,100 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,38781,1731504892507/bfeb2336aed7%2C38781%2C1731504892507.1731504892747 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:36:13,102 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,33523,1731504891245/bfeb2336aed7%2C33523%2C1731504891245.meta.1731504892258.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:36:14,101 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,38781,1731504892507/bfeb2336aed7%2C38781%2C1731504892507.1731504892747 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:36:14,102 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,33523,1731504891245/bfeb2336aed7%2C33523%2C1731504891245.meta.1731504892258.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:36:15,102 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,38781,1731504892507/bfeb2336aed7%2C38781%2C1731504892507.1731504892747 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:36:15,103 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,33523,1731504891245/bfeb2336aed7%2C33523%2C1731504891245.meta.1731504892258.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:36:15,133 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:46467/user/jenkins/test-data/02132352-6242-17a1-f9d1-08ae046913f2/MasterData/WALs/bfeb2336aed7,36873,1731504940518/bfeb2336aed7%2C36873%2C1731504940518.1731504940937 after 4001ms 2024-11-13T13:36:15,134 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:46467/user/jenkins/test-data/02132352-6242-17a1-f9d1-08ae046913f2/MasterData/WALs/bfeb2336aed7,36873,1731504940518/bfeb2336aed7%2C36873%2C1731504940518.1731504940937 to hdfs://localhost:46467/user/jenkins/test-data/02132352-6242-17a1-f9d1-08ae046913f2/MasterData/oldWALs/bfeb2336aed7%2C36873%2C1731504940518.1731504940937 2024-11-13T13:36:15,136 INFO [WAL-Archive-0 {}] region.MasterRegionUtils(50): Moved hdfs://localhost:46467/user/jenkins/test-data/02132352-6242-17a1-f9d1-08ae046913f2/MasterData/oldWALs/bfeb2336aed7%2C36873%2C1731504940518.1731504940937 to hdfs://localhost:46467/user/jenkins/test-data/02132352-6242-17a1-f9d1-08ae046913f2/oldWALs/bfeb2336aed7%2C36873%2C1731504940518.1731504940937$masterlocalwal$ 2024-11-13T13:36:15,136 INFO [M:0;bfeb2336aed7:36873 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-13T13:36:15,136 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-13T13:36:15,136 INFO [M:0;bfeb2336aed7:36873 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:36873 2024-11-13T13:36:15,136 INFO [M:0;bfeb2336aed7:36873 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-13T13:36:15,309 INFO [M:0;bfeb2336aed7:36873 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-13T13:36:15,309 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36873-0x1013468a1810000, quorum=127.0.0.1:53902, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-13T13:36:15,309 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36873-0x1013468a1810000, quorum=127.0.0.1:53902, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-13T13:36:15,343 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@41b9791e{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-13T13:36:15,343 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@1240a3cc{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-13T13:36:15,343 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-13T13:36:15,343 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5f3e5a16{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-13T13:36:15,344 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6c39138a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1bfad055-190e-7fc9-ff21-38a64540517f/hadoop.log.dir/,STOPPED} 2024-11-13T13:36:15,345 WARN [BP-1348924368-172.17.0.2-1731504937923 heartbeating to localhost/127.0.0.1:46467 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-13T13:36:15,345 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-13T13:36:15,345 WARN [BP-1348924368-172.17.0.2-1731504937923 heartbeating to localhost/127.0.0.1:46467 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1348924368-172.17.0.2-1731504937923 (Datanode Uuid 3a717171-4bca-4788-8f6a-d577ec78ab7f) service to localhost/127.0.0.1:46467 2024-11-13T13:36:15,345 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-13T13:36:15,346 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1bfad055-190e-7fc9-ff21-38a64540517f/cluster_7d13f452-3b8c-c473-f020-215ab3c51fcd/data/data3/current/BP-1348924368-172.17.0.2-1731504937923 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-13T13:36:15,347 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1bfad055-190e-7fc9-ff21-38a64540517f/cluster_7d13f452-3b8c-c473-f020-215ab3c51fcd/data/data4/current/BP-1348924368-172.17.0.2-1731504937923 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-13T13:36:15,347 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-13T13:36:15,348 WARN [BP-1348924368-172.17.0.2-1731504937923 heartbeating to localhost/127.0.0.1:46467 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1348924368-172.17.0.2-1731504937923 (Datanode Uuid 0cea774c-18a7-48aa-896e-30dea8c1d062) service to localhost/127.0.0.1:46467 2024-11-13T13:36:15,348 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1bfad055-190e-7fc9-ff21-38a64540517f/cluster_7d13f452-3b8c-c473-f020-215ab3c51fcd/data/data1/current/BP-1348924368-172.17.0.2-1731504937923 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-13T13:36:15,349 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1bfad055-190e-7fc9-ff21-38a64540517f/cluster_7d13f452-3b8c-c473-f020-215ab3c51fcd/data/data2/current/BP-1348924368-172.17.0.2-1731504937923 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-13T13:36:15,350 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@6db938{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-13T13:36:15,350 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5cf288ef{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-13T13:36:15,350 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-13T13:36:15,350 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@ef50a45{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-13T13:36:15,351 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@19016e01{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1bfad055-190e-7fc9-ff21-38a64540517f/hadoop.log.dir/,STOPPED} 2024-11-13T13:36:15,352 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-13T13:36:15,358 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@49ef22be{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-13T13:36:15,358 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@1b230242{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-13T13:36:15,358 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-13T13:36:15,359 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7096145a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-13T13:36:15,359 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@25a29a07{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1bfad055-190e-7fc9-ff21-38a64540517f/hadoop.log.dir/,STOPPED} 2024-11-13T13:36:15,366 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-13T13:36:15,386 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-13T13:36:15,394 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnPipelineRestart Thread=184 (was 158) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-15 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-10-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-13 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-30-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:46467 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:46467 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:46467 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: HMaster-EventLoopGroup-10-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:46467 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-31-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:46467 from jenkins.hfs.4 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-33-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-12 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-31-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-33-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:46467 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-14 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.4@localhost:46467 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-10-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-33-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:46467 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-31-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-30-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-30-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=457 (was 448) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=239 (was 215) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=3444 (was 4002) 2024-11-13T13:36:15,402 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testCompactionRecordDoesntBlockRolling Thread=184, OpenFileDescriptor=457, MaxFileDescriptor=1048576, SystemLoadAverage=239, ProcessCount=11, AvailableMemoryMB=3444 2024-11-13T13:36:15,402 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-13T13:36:15,402 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1bfad055-190e-7fc9-ff21-38a64540517f/hadoop.log.dir so I do NOT create it in target/test-data/10b34d63-e17b-0bfe-df33-b4ed44c431d1 2024-11-13T13:36:15,402 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1bfad055-190e-7fc9-ff21-38a64540517f/hadoop.tmp.dir so I do NOT create it in target/test-data/10b34d63-e17b-0bfe-df33-b4ed44c431d1 2024-11-13T13:36:15,402 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/10b34d63-e17b-0bfe-df33-b4ed44c431d1/cluster_3d9d16a9-7805-2f9f-6cf6-09135c2106e7, deleteOnExit=true 2024-11-13T13:36:15,402 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-13T13:36:15,403 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/10b34d63-e17b-0bfe-df33-b4ed44c431d1/test.cache.data in system properties and HBase conf 2024-11-13T13:36:15,403 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/10b34d63-e17b-0bfe-df33-b4ed44c431d1/hadoop.tmp.dir in system properties and HBase conf 2024-11-13T13:36:15,403 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/10b34d63-e17b-0bfe-df33-b4ed44c431d1/hadoop.log.dir in system properties and HBase conf 2024-11-13T13:36:15,403 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/10b34d63-e17b-0bfe-df33-b4ed44c431d1/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-13T13:36:15,403 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/10b34d63-e17b-0bfe-df33-b4ed44c431d1/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-13T13:36:15,403 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-13T13:36:15,403 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-13T13:36:15,404 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/10b34d63-e17b-0bfe-df33-b4ed44c431d1/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-13T13:36:15,404 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/10b34d63-e17b-0bfe-df33-b4ed44c431d1/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-13T13:36:15,404 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/10b34d63-e17b-0bfe-df33-b4ed44c431d1/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-13T13:36:15,404 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/10b34d63-e17b-0bfe-df33-b4ed44c431d1/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-13T13:36:15,404 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/10b34d63-e17b-0bfe-df33-b4ed44c431d1/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-13T13:36:15,404 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/10b34d63-e17b-0bfe-df33-b4ed44c431d1/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-13T13:36:15,404 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/10b34d63-e17b-0bfe-df33-b4ed44c431d1/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-13T13:36:15,404 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/10b34d63-e17b-0bfe-df33-b4ed44c431d1/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-13T13:36:15,404 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/10b34d63-e17b-0bfe-df33-b4ed44c431d1/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-13T13:36:15,404 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/10b34d63-e17b-0bfe-df33-b4ed44c431d1/nfs.dump.dir in system properties and HBase conf 2024-11-13T13:36:15,405 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/10b34d63-e17b-0bfe-df33-b4ed44c431d1/java.io.tmpdir in system properties and HBase conf 2024-11-13T13:36:15,405 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/10b34d63-e17b-0bfe-df33-b4ed44c431d1/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-13T13:36:15,405 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/10b34d63-e17b-0bfe-df33-b4ed44c431d1/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-13T13:36:15,405 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/10b34d63-e17b-0bfe-df33-b4ed44c431d1/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-13T13:36:15,421 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-13T13:36:15,855 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-13T13:36:15,861 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-13T13:36:15,869 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-13T13:36:15,869 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-13T13:36:15,869 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-13T13:36:15,873 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-13T13:36:15,876 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1d6dee42{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/10b34d63-e17b-0bfe-df33-b4ed44c431d1/hadoop.log.dir/,AVAILABLE} 2024-11-13T13:36:15,877 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1106c0e7{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-13T13:36:15,995 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@677a249b{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/10b34d63-e17b-0bfe-df33-b4ed44c431d1/java.io.tmpdir/jetty-localhost-45383-hadoop-hdfs-3_4_1-tests_jar-_-any-15638859944766710957/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-13T13:36:15,995 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@67c2b9b{HTTP/1.1, (http/1.1)}{localhost:45383} 2024-11-13T13:36:15,995 INFO [Time-limited test {}] server.Server(415): Started @190683ms 2024-11-13T13:36:16,010 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-13T13:36:16,103 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,38781,1731504892507/bfeb2336aed7%2C38781%2C1731504892507.1731504892747 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:36:16,103 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,33523,1731504891245/bfeb2336aed7%2C33523%2C1731504891245.meta.1731504892258.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:36:16,422 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-13T13:36:16,425 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-13T13:36:16,427 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-13T13:36:16,427 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-13T13:36:16,427 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-13T13:36:16,428 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4e873b68{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/10b34d63-e17b-0bfe-df33-b4ed44c431d1/hadoop.log.dir/,AVAILABLE} 2024-11-13T13:36:16,428 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5b135886{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-13T13:36:16,545 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@375c379c{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/10b34d63-e17b-0bfe-df33-b4ed44c431d1/java.io.tmpdir/jetty-localhost-37173-hadoop-hdfs-3_4_1-tests_jar-_-any-27658116058913409/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-13T13:36:16,546 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3d40a54d{HTTP/1.1, (http/1.1)}{localhost:37173} 2024-11-13T13:36:16,546 INFO [Time-limited test {}] server.Server(415): Started @191233ms 2024-11-13T13:36:16,547 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-13T13:36:16,595 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-13T13:36:16,598 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-13T13:36:16,599 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-13T13:36:16,599 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-13T13:36:16,599 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-13T13:36:16,600 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@30873421{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/10b34d63-e17b-0bfe-df33-b4ed44c431d1/hadoop.log.dir/,AVAILABLE} 2024-11-13T13:36:16,601 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@74e6f5d9{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-13T13:36:16,718 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@67b4013d{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/10b34d63-e17b-0bfe-df33-b4ed44c431d1/java.io.tmpdir/jetty-localhost-35333-hadoop-hdfs-3_4_1-tests_jar-_-any-18360430442125015960/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-13T13:36:16,719 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@1c429e05{HTTP/1.1, (http/1.1)}{localhost:35333} 2024-11-13T13:36:16,719 INFO [Time-limited test {}] server.Server(415): Started @191406ms 2024-11-13T13:36:16,719 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-13T13:36:17,103 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,38781,1731504892507/bfeb2336aed7%2C38781%2C1731504892507.1731504892747 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:36:17,104 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,33523,1731504891245/bfeb2336aed7%2C33523%2C1731504891245.meta.1731504892258.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:36:17,853 WARN [Thread-1651 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/10b34d63-e17b-0bfe-df33-b4ed44c431d1/cluster_3d9d16a9-7805-2f9f-6cf6-09135c2106e7/data/data1/current/BP-755367798-172.17.0.2-1731504975433/current, will proceed with Du for space computation calculation, 2024-11-13T13:36:17,853 WARN [Thread-1652 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/10b34d63-e17b-0bfe-df33-b4ed44c431d1/cluster_3d9d16a9-7805-2f9f-6cf6-09135c2106e7/data/data2/current/BP-755367798-172.17.0.2-1731504975433/current, will proceed with Du for space computation calculation, 2024-11-13T13:36:17,885 WARN [Thread-1615 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-13T13:36:17,888 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x596d4dd57cb371df with lease ID 0x6a33c916ec59701c: Processing first storage report for DS-37883efa-c444-4adc-82a9-52c5af97234b from datanode DatanodeRegistration(127.0.0.1:36763, datanodeUuid=72c88717-ed1e-4154-8577-25817a6a2bb1, infoPort=43813, infoSecurePort=0, ipcPort=35343, storageInfo=lv=-57;cid=testClusterID;nsid=1043758622;c=1731504975433) 2024-11-13T13:36:17,888 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x596d4dd57cb371df with lease ID 0x6a33c916ec59701c: from storage DS-37883efa-c444-4adc-82a9-52c5af97234b node DatanodeRegistration(127.0.0.1:36763, datanodeUuid=72c88717-ed1e-4154-8577-25817a6a2bb1, infoPort=43813, infoSecurePort=0, ipcPort=35343, storageInfo=lv=-57;cid=testClusterID;nsid=1043758622;c=1731504975433), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-13T13:36:17,888 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x596d4dd57cb371df with lease ID 0x6a33c916ec59701c: Processing first storage report for DS-424f931b-7f32-4225-85ca-fdb2d2cbd693 from datanode DatanodeRegistration(127.0.0.1:36763, datanodeUuid=72c88717-ed1e-4154-8577-25817a6a2bb1, infoPort=43813, infoSecurePort=0, ipcPort=35343, storageInfo=lv=-57;cid=testClusterID;nsid=1043758622;c=1731504975433) 2024-11-13T13:36:17,888 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x596d4dd57cb371df with lease ID 0x6a33c916ec59701c: from storage DS-424f931b-7f32-4225-85ca-fdb2d2cbd693 node DatanodeRegistration(127.0.0.1:36763, datanodeUuid=72c88717-ed1e-4154-8577-25817a6a2bb1, infoPort=43813, infoSecurePort=0, ipcPort=35343, storageInfo=lv=-57;cid=testClusterID;nsid=1043758622;c=1731504975433), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-13T13:36:17,938 WARN [Thread-1662 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/10b34d63-e17b-0bfe-df33-b4ed44c431d1/cluster_3d9d16a9-7805-2f9f-6cf6-09135c2106e7/data/data3/current/BP-755367798-172.17.0.2-1731504975433/current, will proceed with Du for space computation calculation, 2024-11-13T13:36:17,939 WARN [Thread-1663 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/10b34d63-e17b-0bfe-df33-b4ed44c431d1/cluster_3d9d16a9-7805-2f9f-6cf6-09135c2106e7/data/data4/current/BP-755367798-172.17.0.2-1731504975433/current, will proceed with Du for space computation calculation, 2024-11-13T13:36:17,965 WARN [Thread-1638 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-13T13:36:17,967 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x460d0009a3c1e80c with lease ID 0x6a33c916ec59701d: Processing first storage report for DS-25d678b0-667f-40cd-b432-f5f0c8aa89f1 from datanode DatanodeRegistration(127.0.0.1:40401, datanodeUuid=2f5a8021-f972-4bd3-8a64-3cd5e38e7238, infoPort=38119, infoSecurePort=0, ipcPort=43323, storageInfo=lv=-57;cid=testClusterID;nsid=1043758622;c=1731504975433) 2024-11-13T13:36:17,967 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x460d0009a3c1e80c with lease ID 0x6a33c916ec59701d: from storage DS-25d678b0-667f-40cd-b432-f5f0c8aa89f1 node DatanodeRegistration(127.0.0.1:40401, datanodeUuid=2f5a8021-f972-4bd3-8a64-3cd5e38e7238, infoPort=38119, infoSecurePort=0, ipcPort=43323, storageInfo=lv=-57;cid=testClusterID;nsid=1043758622;c=1731504975433), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-13T13:36:17,967 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x460d0009a3c1e80c with lease ID 0x6a33c916ec59701d: Processing first storage report for DS-d66edec1-82f2-4cd8-b5a7-7a0415d3f4b7 from datanode DatanodeRegistration(127.0.0.1:40401, datanodeUuid=2f5a8021-f972-4bd3-8a64-3cd5e38e7238, infoPort=38119, infoSecurePort=0, ipcPort=43323, storageInfo=lv=-57;cid=testClusterID;nsid=1043758622;c=1731504975433) 2024-11-13T13:36:17,967 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x460d0009a3c1e80c with lease ID 0x6a33c916ec59701d: from storage DS-d66edec1-82f2-4cd8-b5a7-7a0415d3f4b7 node DatanodeRegistration(127.0.0.1:40401, datanodeUuid=2f5a8021-f972-4bd3-8a64-3cd5e38e7238, infoPort=38119, infoSecurePort=0, ipcPort=43323, storageInfo=lv=-57;cid=testClusterID;nsid=1043758622;c=1731504975433), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-13T13:36:18,052 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/10b34d63-e17b-0bfe-df33-b4ed44c431d1 2024-11-13T13:36:18,056 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/10b34d63-e17b-0bfe-df33-b4ed44c431d1/cluster_3d9d16a9-7805-2f9f-6cf6-09135c2106e7/zookeeper_0, clientPort=51252, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/10b34d63-e17b-0bfe-df33-b4ed44c431d1/cluster_3d9d16a9-7805-2f9f-6cf6-09135c2106e7/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/10b34d63-e17b-0bfe-df33-b4ed44c431d1/cluster_3d9d16a9-7805-2f9f-6cf6-09135c2106e7/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-13T13:36:18,057 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=51252 2024-11-13T13:36:18,057 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T13:36:18,059 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T13:36:18,071 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40401 is added to blk_1073741825_1001 (size=7) 2024-11-13T13:36:18,071 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36763 is added to blk_1073741825_1001 (size=7) 2024-11-13T13:36:18,072 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:36201/user/jenkins/test-data/9254dffa-9c3f-ea55-6718-6f671995d9b3 with version=8 2024-11-13T13:36:18,072 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:38779/user/jenkins/test-data/52d71c14-a597-0de0-63d6-b75f134ce0ba/hbase-staging 2024-11-13T13:36:18,075 INFO [Time-limited test {}] client.ConnectionUtils(128): master/bfeb2336aed7:0 server-side Connection retries=45 2024-11-13T13:36:18,075 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-13T13:36:18,075 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-13T13:36:18,075 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-13T13:36:18,075 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-13T13:36:18,075 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-13T13:36:18,075 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-13T13:36:18,075 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-13T13:36:18,078 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:38899 2024-11-13T13:36:18,079 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:38899 connecting to ZooKeeper ensemble=127.0.0.1:51252 2024-11-13T13:36:18,104 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,33523,1731504891245/bfeb2336aed7%2C33523%2C1731504891245.meta.1731504892258.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:36:18,104 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,38781,1731504892507/bfeb2336aed7%2C38781%2C1731504892507.1731504892747 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:36:18,138 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:388990x0, quorum=127.0.0.1:51252, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-13T13:36:18,139 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:38899-0x101346934310000 connected 2024-11-13T13:36:18,219 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T13:36:18,221 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T13:36:18,222 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:38899-0x101346934310000, quorum=127.0.0.1:51252, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-13T13:36:18,223 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:36201/user/jenkins/test-data/9254dffa-9c3f-ea55-6718-6f671995d9b3, hbase.cluster.distributed=false 2024-11-13T13:36:18,224 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:38899-0x101346934310000, quorum=127.0.0.1:51252, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-13T13:36:18,225 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=38899 2024-11-13T13:36:18,225 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=38899 2024-11-13T13:36:18,225 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=38899 2024-11-13T13:36:18,225 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=38899 2024-11-13T13:36:18,226 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=38899 2024-11-13T13:36:18,242 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/bfeb2336aed7:0 server-side Connection retries=45 2024-11-13T13:36:18,243 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-13T13:36:18,243 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-13T13:36:18,243 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-13T13:36:18,243 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-13T13:36:18,243 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-13T13:36:18,243 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-13T13:36:18,243 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-13T13:36:18,244 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:44923 2024-11-13T13:36:18,246 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:44923 connecting to ZooKeeper ensemble=127.0.0.1:51252 2024-11-13T13:36:18,246 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T13:36:18,248 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T13:36:18,261 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:449230x0, quorum=127.0.0.1:51252, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-13T13:36:18,262 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:449230x0, quorum=127.0.0.1:51252, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-13T13:36:18,262 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:44923-0x101346934310001 connected 2024-11-13T13:36:18,262 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-13T13:36:18,264 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-13T13:36:18,265 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:44923-0x101346934310001, quorum=127.0.0.1:51252, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-13T13:36:18,266 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:44923-0x101346934310001, quorum=127.0.0.1:51252, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-13T13:36:18,273 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=44923 2024-11-13T13:36:18,274 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=44923 2024-11-13T13:36:18,274 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=44923 2024-11-13T13:36:18,275 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=44923 2024-11-13T13:36:18,275 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=44923 2024-11-13T13:36:18,288 DEBUG [M:0;bfeb2336aed7:38899 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;bfeb2336aed7:38899 2024-11-13T13:36:18,288 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/bfeb2336aed7,38899,1731504978074 2024-11-13T13:36:18,301 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44923-0x101346934310001, quorum=127.0.0.1:51252, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-13T13:36:18,301 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38899-0x101346934310000, quorum=127.0.0.1:51252, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-13T13:36:18,301 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:38899-0x101346934310000, quorum=127.0.0.1:51252, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/bfeb2336aed7,38899,1731504978074 2024-11-13T13:36:18,314 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38899-0x101346934310000, quorum=127.0.0.1:51252, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T13:36:18,314 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44923-0x101346934310001, quorum=127.0.0.1:51252, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-13T13:36:18,314 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44923-0x101346934310001, quorum=127.0.0.1:51252, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T13:36:18,314 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:38899-0x101346934310000, quorum=127.0.0.1:51252, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-13T13:36:18,315 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/bfeb2336aed7,38899,1731504978074 from backup master directory 2024-11-13T13:36:18,324 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44923-0x101346934310001, quorum=127.0.0.1:51252, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-13T13:36:18,324 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38899-0x101346934310000, quorum=127.0.0.1:51252, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/bfeb2336aed7,38899,1731504978074 2024-11-13T13:36:18,324 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38899-0x101346934310000, quorum=127.0.0.1:51252, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-13T13:36:18,324 WARN [master/bfeb2336aed7:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-13T13:36:18,324 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=bfeb2336aed7,38899,1731504978074 2024-11-13T13:36:18,328 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:36201/user/jenkins/test-data/9254dffa-9c3f-ea55-6718-6f671995d9b3/hbase.id] with ID: c7c29116-8504-42b3-9f4e-06b380e1e5d8 2024-11-13T13:36:18,328 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:36201/user/jenkins/test-data/9254dffa-9c3f-ea55-6718-6f671995d9b3/.tmp/hbase.id 2024-11-13T13:36:18,336 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40401 is added to blk_1073741826_1002 (size=42) 2024-11-13T13:36:18,336 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36763 is added to blk_1073741826_1002 (size=42) 2024-11-13T13:36:18,337 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:36201/user/jenkins/test-data/9254dffa-9c3f-ea55-6718-6f671995d9b3/.tmp/hbase.id]:[hdfs://localhost:36201/user/jenkins/test-data/9254dffa-9c3f-ea55-6718-6f671995d9b3/hbase.id] 2024-11-13T13:36:18,349 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T13:36:18,350 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-13T13:36:18,351 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-13T13:36:18,364 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44923-0x101346934310001, quorum=127.0.0.1:51252, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T13:36:18,364 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38899-0x101346934310000, quorum=127.0.0.1:51252, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T13:36:18,385 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36763 is added to blk_1073741827_1003 (size=196) 2024-11-13T13:36:18,385 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40401 is added to blk_1073741827_1003 (size=196) 2024-11-13T13:36:18,386 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-13T13:36:18,387 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-13T13:36:18,388 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-13T13:36:18,405 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36763 is added to blk_1073741828_1004 (size=1189) 2024-11-13T13:36:18,405 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40401 is added to blk_1073741828_1004 (size=1189) 2024-11-13T13:36:18,407 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:36201/user/jenkins/test-data/9254dffa-9c3f-ea55-6718-6f671995d9b3/MasterData/data/master/store 2024-11-13T13:36:18,415 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36763 is added to blk_1073741829_1005 (size=34) 2024-11-13T13:36:18,415 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40401 is added to blk_1073741829_1005 (size=34) 2024-11-13T13:36:18,416 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T13:36:18,416 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-13T13:36:18,416 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T13:36:18,416 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T13:36:18,416 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-13T13:36:18,416 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T13:36:18,416 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T13:36:18,416 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731504978416Disabling compacts and flushes for region at 1731504978416Disabling writes for close at 1731504978416Writing region close event to WAL at 1731504978416Closed at 1731504978416 2024-11-13T13:36:18,417 WARN [master/bfeb2336aed7:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:36201/user/jenkins/test-data/9254dffa-9c3f-ea55-6718-6f671995d9b3/MasterData/data/master/store/.initializing 2024-11-13T13:36:18,417 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:36201/user/jenkins/test-data/9254dffa-9c3f-ea55-6718-6f671995d9b3/MasterData/WALs/bfeb2336aed7,38899,1731504978074 2024-11-13T13:36:18,420 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=bfeb2336aed7%2C38899%2C1731504978074, suffix=, logDir=hdfs://localhost:36201/user/jenkins/test-data/9254dffa-9c3f-ea55-6718-6f671995d9b3/MasterData/WALs/bfeb2336aed7,38899,1731504978074, archiveDir=hdfs://localhost:36201/user/jenkins/test-data/9254dffa-9c3f-ea55-6718-6f671995d9b3/MasterData/oldWALs, maxLogs=10 2024-11-13T13:36:18,421 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor bfeb2336aed7%2C38899%2C1731504978074.1731504978420 2024-11-13T13:36:18,426 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/9254dffa-9c3f-ea55-6718-6f671995d9b3/MasterData/WALs/bfeb2336aed7,38899,1731504978074/bfeb2336aed7%2C38899%2C1731504978074.1731504978420 2024-11-13T13:36:18,427 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38119:38119),(127.0.0.1/127.0.0.1:43813:43813)] 2024-11-13T13:36:18,427 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-13T13:36:18,428 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T13:36:18,428 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-13T13:36:18,428 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-13T13:36:18,429 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-13T13:36:18,431 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-13T13:36:18,431 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T13:36:18,431 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T13:36:18,431 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-13T13:36:18,432 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-13T13:36:18,432 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T13:36:18,433 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T13:36:18,433 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-13T13:36:18,434 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-13T13:36:18,434 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T13:36:18,435 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T13:36:18,435 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-13T13:36:18,436 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-13T13:36:18,436 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T13:36:18,437 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T13:36:18,437 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-13T13:36:18,437 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36201/user/jenkins/test-data/9254dffa-9c3f-ea55-6718-6f671995d9b3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-13T13:36:18,438 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36201/user/jenkins/test-data/9254dffa-9c3f-ea55-6718-6f671995d9b3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-13T13:36:18,439 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-13T13:36:18,439 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-13T13:36:18,440 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-13T13:36:18,441 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-13T13:36:18,444 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36201/user/jenkins/test-data/9254dffa-9c3f-ea55-6718-6f671995d9b3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-13T13:36:18,444 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=744666, jitterRate=-0.05310831964015961}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-13T13:36:18,445 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731504978428Initializing all the Stores at 1731504978429 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731504978429Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731504978429Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731504978429Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731504978429Cleaning up temporary data from old regions at 1731504978439 (+10 ms)Region opened successfully at 1731504978445 (+6 ms) 2024-11-13T13:36:18,445 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-13T13:36:18,448 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5bff22a1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=bfeb2336aed7/172.17.0.2:0 2024-11-13T13:36:18,449 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-13T13:36:18,450 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-13T13:36:18,450 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-13T13:36:18,450 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-13T13:36:18,450 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-13T13:36:18,451 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-13T13:36:18,451 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-13T13:36:18,453 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-13T13:36:18,454 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38899-0x101346934310000, quorum=127.0.0.1:51252, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-13T13:36:18,461 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-13T13:36:18,462 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-13T13:36:18,462 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38899-0x101346934310000, quorum=127.0.0.1:51252, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-13T13:36:18,471 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-13T13:36:18,472 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-13T13:36:18,475 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38899-0x101346934310000, quorum=127.0.0.1:51252, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-13T13:36:18,482 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-13T13:36:18,483 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38899-0x101346934310000, quorum=127.0.0.1:51252, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-13T13:36:18,492 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-13T13:36:18,495 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38899-0x101346934310000, quorum=127.0.0.1:51252, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-13T13:36:18,503 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-13T13:36:18,513 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44923-0x101346934310001, quorum=127.0.0.1:51252, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-13T13:36:18,513 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38899-0x101346934310000, quorum=127.0.0.1:51252, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-13T13:36:18,514 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44923-0x101346934310001, quorum=127.0.0.1:51252, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T13:36:18,514 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38899-0x101346934310000, quorum=127.0.0.1:51252, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T13:36:18,514 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=bfeb2336aed7,38899,1731504978074, sessionid=0x101346934310000, setting cluster-up flag (Was=false) 2024-11-13T13:36:18,535 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44923-0x101346934310001, quorum=127.0.0.1:51252, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T13:36:18,535 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38899-0x101346934310000, quorum=127.0.0.1:51252, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T13:36:18,566 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-13T13:36:18,567 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=bfeb2336aed7,38899,1731504978074 2024-11-13T13:36:18,587 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38899-0x101346934310000, quorum=127.0.0.1:51252, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T13:36:18,587 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44923-0x101346934310001, quorum=127.0.0.1:51252, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T13:36:18,624 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-13T13:36:18,626 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=bfeb2336aed7,38899,1731504978074 2024-11-13T13:36:18,629 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:36201/user/jenkins/test-data/9254dffa-9c3f-ea55-6718-6f671995d9b3/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-13T13:36:18,630 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-13T13:36:18,631 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-13T13:36:18,631 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-13T13:36:18,631 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: bfeb2336aed7,38899,1731504978074 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-13T13:36:18,634 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/bfeb2336aed7:0, corePoolSize=5, maxPoolSize=5 2024-11-13T13:36:18,634 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/bfeb2336aed7:0, corePoolSize=5, maxPoolSize=5 2024-11-13T13:36:18,634 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/bfeb2336aed7:0, corePoolSize=5, maxPoolSize=5 2024-11-13T13:36:18,634 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/bfeb2336aed7:0, corePoolSize=5, maxPoolSize=5 2024-11-13T13:36:18,634 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/bfeb2336aed7:0, corePoolSize=10, maxPoolSize=10 2024-11-13T13:36:18,634 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/bfeb2336aed7:0, corePoolSize=1, maxPoolSize=1 2024-11-13T13:36:18,634 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/bfeb2336aed7:0, corePoolSize=2, maxPoolSize=2 2024-11-13T13:36:18,634 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/bfeb2336aed7:0, corePoolSize=1, maxPoolSize=1 2024-11-13T13:36:18,635 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731505008634 2024-11-13T13:36:18,635 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-13T13:36:18,635 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-13T13:36:18,635 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-13T13:36:18,635 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-13T13:36:18,635 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-13T13:36:18,635 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-13T13:36:18,635 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-13T13:36:18,635 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-13T13:36:18,635 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-13T13:36:18,635 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-13T13:36:18,636 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-13T13:36:18,636 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-13T13:36:18,636 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-13T13:36:18,636 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-13T13:36:18,636 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T13:36:18,637 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-13T13:36:18,637 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/bfeb2336aed7:0:becomeActiveMaster-HFileCleaner.large.0-1731504978636,5,FailOnTimeoutGroup] 2024-11-13T13:36:18,639 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/bfeb2336aed7:0:becomeActiveMaster-HFileCleaner.small.0-1731504978637,5,FailOnTimeoutGroup] 2024-11-13T13:36:18,640 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-13T13:36:18,640 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-13T13:36:18,640 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-13T13:36:18,640 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-13T13:36:18,645 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36763 is added to blk_1073741831_1007 (size=1321) 2024-11-13T13:36:18,648 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40401 is added to blk_1073741831_1007 (size=1321) 2024-11-13T13:36:18,649 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:36201/user/jenkins/test-data/9254dffa-9c3f-ea55-6718-6f671995d9b3/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-13T13:36:18,649 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:36201/user/jenkins/test-data/9254dffa-9c3f-ea55-6718-6f671995d9b3 2024-11-13T13:36:18,660 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36763 is added to blk_1073741832_1008 (size=32) 2024-11-13T13:36:18,660 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40401 is added to blk_1073741832_1008 (size=32) 2024-11-13T13:36:18,661 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T13:36:18,662 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-13T13:36:18,664 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-13T13:36:18,664 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T13:36:18,665 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T13:36:18,665 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-13T13:36:18,666 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-13T13:36:18,667 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T13:36:18,667 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T13:36:18,667 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-13T13:36:18,669 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-13T13:36:18,669 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T13:36:18,669 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T13:36:18,670 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-13T13:36:18,671 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-13T13:36:18,671 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T13:36:18,672 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T13:36:18,672 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-13T13:36:18,673 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36201/user/jenkins/test-data/9254dffa-9c3f-ea55-6718-6f671995d9b3/data/hbase/meta/1588230740 2024-11-13T13:36:18,674 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36201/user/jenkins/test-data/9254dffa-9c3f-ea55-6718-6f671995d9b3/data/hbase/meta/1588230740 2024-11-13T13:36:18,675 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-13T13:36:18,675 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-13T13:36:18,676 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-13T13:36:18,677 INFO [RS:0;bfeb2336aed7:44923 {}] regionserver.HRegionServer(746): ClusterId : c7c29116-8504-42b3-9f4e-06b380e1e5d8 2024-11-13T13:36:18,677 DEBUG [RS:0;bfeb2336aed7:44923 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-13T13:36:18,677 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-13T13:36:18,679 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36201/user/jenkins/test-data/9254dffa-9c3f-ea55-6718-6f671995d9b3/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-13T13:36:18,680 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=779558, jitterRate=-0.008741125464439392}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-13T13:36:18,680 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731504978661Initializing all the Stores at 1731504978662 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731504978662Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731504978662Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731504978662Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731504978662Cleaning up temporary data from old regions at 1731504978675 (+13 ms)Region opened successfully at 1731504978680 (+5 ms) 2024-11-13T13:36:18,681 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-13T13:36:18,681 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-13T13:36:18,681 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-13T13:36:18,681 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-13T13:36:18,681 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-13T13:36:18,681 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-13T13:36:18,681 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731504978681Disabling compacts and flushes for region at 1731504978681Disabling writes for close at 1731504978681Writing region close event to WAL at 1731504978681Closed at 1731504978681 2024-11-13T13:36:18,682 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-13T13:36:18,682 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-13T13:36:18,683 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-13T13:36:18,683 DEBUG [RS:0;bfeb2336aed7:44923 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-13T13:36:18,683 DEBUG [RS:0;bfeb2336aed7:44923 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-13T13:36:18,684 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-13T13:36:18,685 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-13T13:36:18,694 DEBUG [RS:0;bfeb2336aed7:44923 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-13T13:36:18,694 DEBUG [RS:0;bfeb2336aed7:44923 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@161aea72, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=bfeb2336aed7/172.17.0.2:0 2024-11-13T13:36:18,706 DEBUG [RS:0;bfeb2336aed7:44923 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;bfeb2336aed7:44923 2024-11-13T13:36:18,706 INFO [RS:0;bfeb2336aed7:44923 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-13T13:36:18,706 INFO [RS:0;bfeb2336aed7:44923 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-13T13:36:18,706 DEBUG [RS:0;bfeb2336aed7:44923 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-13T13:36:18,707 INFO [RS:0;bfeb2336aed7:44923 {}] regionserver.HRegionServer(2659): reportForDuty to master=bfeb2336aed7,38899,1731504978074 with port=44923, startcode=1731504978242 2024-11-13T13:36:18,708 DEBUG [RS:0;bfeb2336aed7:44923 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-13T13:36:18,710 INFO [HMaster-EventLoopGroup-12-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50887, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.5 (auth:SIMPLE), service=RegionServerStatusService 2024-11-13T13:36:18,710 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38899 {}] master.ServerManager(363): Checking decommissioned status of RegionServer bfeb2336aed7,44923,1731504978242 2024-11-13T13:36:18,710 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38899 {}] master.ServerManager(517): Registering regionserver=bfeb2336aed7,44923,1731504978242 2024-11-13T13:36:18,712 DEBUG [RS:0;bfeb2336aed7:44923 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:36201/user/jenkins/test-data/9254dffa-9c3f-ea55-6718-6f671995d9b3 2024-11-13T13:36:18,712 DEBUG [RS:0;bfeb2336aed7:44923 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:36201 2024-11-13T13:36:18,712 DEBUG [RS:0;bfeb2336aed7:44923 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-13T13:36:18,724 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38899-0x101346934310000, quorum=127.0.0.1:51252, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-13T13:36:18,725 DEBUG [RS:0;bfeb2336aed7:44923 {}] zookeeper.ZKUtil(111): regionserver:44923-0x101346934310001, quorum=127.0.0.1:51252, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/bfeb2336aed7,44923,1731504978242 2024-11-13T13:36:18,725 WARN [RS:0;bfeb2336aed7:44923 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-13T13:36:18,725 INFO [RS:0;bfeb2336aed7:44923 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-13T13:36:18,725 DEBUG [RS:0;bfeb2336aed7:44923 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:36201/user/jenkins/test-data/9254dffa-9c3f-ea55-6718-6f671995d9b3/WALs/bfeb2336aed7,44923,1731504978242 2024-11-13T13:36:18,726 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [bfeb2336aed7,44923,1731504978242] 2024-11-13T13:36:18,729 INFO [RS:0;bfeb2336aed7:44923 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-13T13:36:18,730 INFO [RS:0;bfeb2336aed7:44923 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-13T13:36:18,730 INFO [RS:0;bfeb2336aed7:44923 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-13T13:36:18,730 INFO [RS:0;bfeb2336aed7:44923 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-13T13:36:18,730 INFO [RS:0;bfeb2336aed7:44923 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-13T13:36:18,731 INFO [RS:0;bfeb2336aed7:44923 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-13T13:36:18,731 INFO [RS:0;bfeb2336aed7:44923 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-13T13:36:18,731 DEBUG [RS:0;bfeb2336aed7:44923 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/bfeb2336aed7:0, corePoolSize=1, maxPoolSize=1 2024-11-13T13:36:18,731 DEBUG [RS:0;bfeb2336aed7:44923 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/bfeb2336aed7:0, corePoolSize=1, maxPoolSize=1 2024-11-13T13:36:18,731 DEBUG [RS:0;bfeb2336aed7:44923 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/bfeb2336aed7:0, corePoolSize=1, maxPoolSize=1 2024-11-13T13:36:18,731 DEBUG [RS:0;bfeb2336aed7:44923 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/bfeb2336aed7:0, corePoolSize=1, maxPoolSize=1 2024-11-13T13:36:18,731 DEBUG [RS:0;bfeb2336aed7:44923 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/bfeb2336aed7:0, corePoolSize=1, maxPoolSize=1 2024-11-13T13:36:18,731 DEBUG [RS:0;bfeb2336aed7:44923 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/bfeb2336aed7:0, corePoolSize=2, maxPoolSize=2 2024-11-13T13:36:18,732 DEBUG [RS:0;bfeb2336aed7:44923 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/bfeb2336aed7:0, corePoolSize=1, maxPoolSize=1 2024-11-13T13:36:18,732 DEBUG [RS:0;bfeb2336aed7:44923 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/bfeb2336aed7:0, corePoolSize=1, maxPoolSize=1 2024-11-13T13:36:18,732 DEBUG [RS:0;bfeb2336aed7:44923 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/bfeb2336aed7:0, corePoolSize=1, maxPoolSize=1 2024-11-13T13:36:18,732 DEBUG [RS:0;bfeb2336aed7:44923 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/bfeb2336aed7:0, corePoolSize=1, maxPoolSize=1 2024-11-13T13:36:18,732 DEBUG [RS:0;bfeb2336aed7:44923 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/bfeb2336aed7:0, corePoolSize=1, maxPoolSize=1 2024-11-13T13:36:18,732 DEBUG [RS:0;bfeb2336aed7:44923 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/bfeb2336aed7:0, corePoolSize=1, maxPoolSize=1 2024-11-13T13:36:18,732 DEBUG [RS:0;bfeb2336aed7:44923 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/bfeb2336aed7:0, corePoolSize=3, maxPoolSize=3 2024-11-13T13:36:18,732 DEBUG [RS:0;bfeb2336aed7:44923 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/bfeb2336aed7:0, corePoolSize=3, maxPoolSize=3 2024-11-13T13:36:18,732 INFO [RS:0;bfeb2336aed7:44923 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-13T13:36:18,732 INFO [RS:0;bfeb2336aed7:44923 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-13T13:36:18,733 INFO [RS:0;bfeb2336aed7:44923 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-13T13:36:18,733 INFO [RS:0;bfeb2336aed7:44923 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-13T13:36:18,733 INFO [RS:0;bfeb2336aed7:44923 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-13T13:36:18,733 INFO [RS:0;bfeb2336aed7:44923 {}] hbase.ChoreService(168): Chore ScheduledChore name=bfeb2336aed7,44923,1731504978242-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-13T13:36:18,749 INFO [RS:0;bfeb2336aed7:44923 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-13T13:36:18,749 INFO [RS:0;bfeb2336aed7:44923 {}] hbase.ChoreService(168): Chore ScheduledChore name=bfeb2336aed7,44923,1731504978242-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-13T13:36:18,749 INFO [RS:0;bfeb2336aed7:44923 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-13T13:36:18,749 INFO [RS:0;bfeb2336aed7:44923 {}] regionserver.Replication(171): bfeb2336aed7,44923,1731504978242 started 2024-11-13T13:36:18,766 INFO [RS:0;bfeb2336aed7:44923 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-13T13:36:18,766 INFO [RS:0;bfeb2336aed7:44923 {}] regionserver.HRegionServer(1482): Serving as bfeb2336aed7,44923,1731504978242, RpcServer on bfeb2336aed7/172.17.0.2:44923, sessionid=0x101346934310001 2024-11-13T13:36:18,766 DEBUG [RS:0;bfeb2336aed7:44923 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-13T13:36:18,766 DEBUG [RS:0;bfeb2336aed7:44923 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager bfeb2336aed7,44923,1731504978242 2024-11-13T13:36:18,766 DEBUG [RS:0;bfeb2336aed7:44923 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'bfeb2336aed7,44923,1731504978242' 2024-11-13T13:36:18,766 DEBUG [RS:0;bfeb2336aed7:44923 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-13T13:36:18,767 DEBUG [RS:0;bfeb2336aed7:44923 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-13T13:36:18,767 DEBUG [RS:0;bfeb2336aed7:44923 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-13T13:36:18,767 DEBUG [RS:0;bfeb2336aed7:44923 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-13T13:36:18,768 DEBUG [RS:0;bfeb2336aed7:44923 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager bfeb2336aed7,44923,1731504978242 2024-11-13T13:36:18,768 DEBUG [RS:0;bfeb2336aed7:44923 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'bfeb2336aed7,44923,1731504978242' 2024-11-13T13:36:18,768 DEBUG [RS:0;bfeb2336aed7:44923 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-13T13:36:18,768 DEBUG [RS:0;bfeb2336aed7:44923 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-13T13:36:18,768 DEBUG [RS:0;bfeb2336aed7:44923 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-13T13:36:18,768 INFO [RS:0;bfeb2336aed7:44923 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-13T13:36:18,768 INFO [RS:0;bfeb2336aed7:44923 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-13T13:36:18,836 WARN [bfeb2336aed7:38899 {}] assignment.AssignmentManager(2443): No servers available; cannot place 1 unassigned regions. 2024-11-13T13:36:18,870 INFO [RS:0;bfeb2336aed7:44923 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=bfeb2336aed7%2C44923%2C1731504978242, suffix=, logDir=hdfs://localhost:36201/user/jenkins/test-data/9254dffa-9c3f-ea55-6718-6f671995d9b3/WALs/bfeb2336aed7,44923,1731504978242, archiveDir=hdfs://localhost:36201/user/jenkins/test-data/9254dffa-9c3f-ea55-6718-6f671995d9b3/oldWALs, maxLogs=32 2024-11-13T13:36:18,871 INFO [RS:0;bfeb2336aed7:44923 {}] monitor.StreamSlowMonitor(122): New stream slow monitor bfeb2336aed7%2C44923%2C1731504978242.1731504978871 2024-11-13T13:36:18,890 INFO [RS:0;bfeb2336aed7:44923 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/9254dffa-9c3f-ea55-6718-6f671995d9b3/WALs/bfeb2336aed7,44923,1731504978242/bfeb2336aed7%2C44923%2C1731504978242.1731504978871 2024-11-13T13:36:18,891 DEBUG [RS:0;bfeb2336aed7:44923 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38119:38119),(127.0.0.1/127.0.0.1:43813:43813)] 2024-11-13T13:36:19,086 DEBUG [bfeb2336aed7:38899 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-13T13:36:19,087 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=bfeb2336aed7,44923,1731504978242 2024-11-13T13:36:19,088 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as bfeb2336aed7,44923,1731504978242, state=OPENING 2024-11-13T13:36:19,105 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,33523,1731504891245/bfeb2336aed7%2C33523%2C1731504891245.meta.1731504892258.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:36:19,105 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,38781,1731504892507/bfeb2336aed7%2C38781%2C1731504892507.1731504892747 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:36:19,135 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-13T13:36:19,145 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38899-0x101346934310000, quorum=127.0.0.1:51252, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T13:36:19,145 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44923-0x101346934310001, quorum=127.0.0.1:51252, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T13:36:19,146 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-13T13:36:19,146 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-13T13:36:19,146 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-13T13:36:19,146 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=bfeb2336aed7,44923,1731504978242}] 2024-11-13T13:36:19,300 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-13T13:36:19,303 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41457, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-13T13:36:19,308 INFO [RS_OPEN_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-13T13:36:19,309 INFO [RS_OPEN_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-13T13:36:19,311 INFO [RS_OPEN_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=bfeb2336aed7%2C44923%2C1731504978242.meta, suffix=.meta, logDir=hdfs://localhost:36201/user/jenkins/test-data/9254dffa-9c3f-ea55-6718-6f671995d9b3/WALs/bfeb2336aed7,44923,1731504978242, archiveDir=hdfs://localhost:36201/user/jenkins/test-data/9254dffa-9c3f-ea55-6718-6f671995d9b3/oldWALs, maxLogs=32 2024-11-13T13:36:19,312 INFO [RS_OPEN_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor bfeb2336aed7%2C44923%2C1731504978242.meta.1731504979312.meta 2024-11-13T13:36:19,319 INFO [RS_OPEN_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/9254dffa-9c3f-ea55-6718-6f671995d9b3/WALs/bfeb2336aed7,44923,1731504978242/bfeb2336aed7%2C44923%2C1731504978242.meta.1731504979312.meta 2024-11-13T13:36:19,322 DEBUG [RS_OPEN_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43813:43813),(127.0.0.1/127.0.0.1:38119:38119)] 2024-11-13T13:36:19,326 DEBUG [RS_OPEN_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-13T13:36:19,327 DEBUG [RS_OPEN_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-13T13:36:19,327 DEBUG [RS_OPEN_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-13T13:36:19,327 INFO [RS_OPEN_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-13T13:36:19,327 DEBUG [RS_OPEN_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-13T13:36:19,327 DEBUG [RS_OPEN_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T13:36:19,327 DEBUG [RS_OPEN_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-13T13:36:19,327 DEBUG [RS_OPEN_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-13T13:36:19,329 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-13T13:36:19,330 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-13T13:36:19,330 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T13:36:19,331 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T13:36:19,331 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-13T13:36:19,331 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-13T13:36:19,331 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T13:36:19,332 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T13:36:19,332 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-13T13:36:19,332 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-13T13:36:19,332 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T13:36:19,333 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T13:36:19,333 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-13T13:36:19,333 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-13T13:36:19,333 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T13:36:19,334 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T13:36:19,334 DEBUG [RS_OPEN_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-13T13:36:19,334 DEBUG [RS_OPEN_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36201/user/jenkins/test-data/9254dffa-9c3f-ea55-6718-6f671995d9b3/data/hbase/meta/1588230740 2024-11-13T13:36:19,335 DEBUG [RS_OPEN_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36201/user/jenkins/test-data/9254dffa-9c3f-ea55-6718-6f671995d9b3/data/hbase/meta/1588230740 2024-11-13T13:36:19,336 DEBUG [RS_OPEN_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-13T13:36:19,336 DEBUG [RS_OPEN_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-13T13:36:19,337 DEBUG [RS_OPEN_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-13T13:36:19,338 DEBUG [RS_OPEN_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-13T13:36:19,339 INFO [RS_OPEN_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=851056, jitterRate=0.08217430114746094}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-13T13:36:19,339 DEBUG [RS_OPEN_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-13T13:36:19,339 DEBUG [RS_OPEN_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731504979327Writing region info on filesystem at 1731504979328 (+1 ms)Initializing all the Stores at 1731504979329 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731504979329Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731504979329Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731504979329Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731504979329Cleaning up temporary data from old regions at 1731504979336 (+7 ms)Running coprocessor post-open hooks at 1731504979339 (+3 ms)Region opened successfully at 1731504979339 2024-11-13T13:36:19,340 INFO [RS_OPEN_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731504979300 2024-11-13T13:36:19,342 DEBUG [RS_OPEN_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-13T13:36:19,342 INFO [RS_OPEN_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-13T13:36:19,343 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=bfeb2336aed7,44923,1731504978242 2024-11-13T13:36:19,344 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as bfeb2336aed7,44923,1731504978242, state=OPEN 2024-11-13T13:36:19,382 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44923-0x101346934310001, quorum=127.0.0.1:51252, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-13T13:36:19,382 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38899-0x101346934310000, quorum=127.0.0.1:51252, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-13T13:36:19,382 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=bfeb2336aed7,44923,1731504978242 2024-11-13T13:36:19,383 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-13T13:36:19,383 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-13T13:36:19,389 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-13T13:36:19,389 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=bfeb2336aed7,44923,1731504978242 in 237 msec 2024-11-13T13:36:19,396 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-13T13:36:19,396 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 707 msec 2024-11-13T13:36:19,397 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-13T13:36:19,397 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-13T13:36:19,399 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-13T13:36:19,399 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=bfeb2336aed7,44923,1731504978242, seqNum=-1] 2024-11-13T13:36:19,399 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-13T13:36:19,401 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57669, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-13T13:36:19,407 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 776 msec 2024-11-13T13:36:19,407 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731504979407, completionTime=-1 2024-11-13T13:36:19,407 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-13T13:36:19,407 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-13T13:36:19,409 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=1 2024-11-13T13:36:19,409 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731505039409 2024-11-13T13:36:19,409 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731505099409 2024-11-13T13:36:19,409 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 1 msec 2024-11-13T13:36:19,409 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=bfeb2336aed7,38899,1731504978074-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-13T13:36:19,409 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=bfeb2336aed7,38899,1731504978074-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-13T13:36:19,409 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=bfeb2336aed7,38899,1731504978074-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-13T13:36:19,409 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-bfeb2336aed7:38899, period=300000, unit=MILLISECONDS is enabled. 2024-11-13T13:36:19,409 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-13T13:36:19,410 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-13T13:36:19,411 DEBUG [master/bfeb2336aed7:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-13T13:36:19,413 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.089sec 2024-11-13T13:36:19,413 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-13T13:36:19,413 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-13T13:36:19,413 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-13T13:36:19,413 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-13T13:36:19,413 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-13T13:36:19,413 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=bfeb2336aed7,38899,1731504978074-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-13T13:36:19,413 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=bfeb2336aed7,38899,1731504978074-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-13T13:36:19,415 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-13T13:36:19,415 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-13T13:36:19,416 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=bfeb2336aed7,38899,1731504978074-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-13T13:36:19,478 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@33b8310b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T13:36:19,478 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request bfeb2336aed7,38899,-1 for getting cluster id 2024-11-13T13:36:19,478 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-13T13:36:19,483 DEBUG [HMaster-EventLoopGroup-12-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'c7c29116-8504-42b3-9f4e-06b380e1e5d8' 2024-11-13T13:36:19,484 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-13T13:36:19,484 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "c7c29116-8504-42b3-9f4e-06b380e1e5d8" 2024-11-13T13:36:19,485 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1799ecdc, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T13:36:19,485 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [bfeb2336aed7,38899,-1] 2024-11-13T13:36:19,486 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-13T13:36:19,486 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T13:36:19,488 INFO [HMaster-EventLoopGroup-12-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36770, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-13T13:36:19,489 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4301c0a6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T13:36:19,490 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-13T13:36:19,491 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=bfeb2336aed7,44923,1731504978242, seqNum=-1] 2024-11-13T13:36:19,491 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-13T13:36:19,492 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57944, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-13T13:36:19,494 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=bfeb2336aed7,38899,1731504978074 2024-11-13T13:36:19,494 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T13:36:19,497 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-13T13:36:19,497 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-13T13:36:19,498 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.AsyncConnectionImpl(321): The fetched master address is bfeb2336aed7,38899,1731504978074 2024-11-13T13:36:19,498 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@9c84136 2024-11-13T13:36:19,498 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-13T13:36:19,499 INFO [HMaster-EventLoopGroup-12-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36772, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-13T13:36:19,500 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38899 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-13T13:36:19,500 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38899 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-13T13:36:19,500 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38899 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testCompactionRecordDoesntBlockRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-13T13:36:19,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38899 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-13T13:36:19,503 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-11-13T13:36:19,504 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T13:36:19,504 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38899 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testCompactionRecordDoesntBlockRolling" procId is: 4 2024-11-13T13:36:19,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38899 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-13T13:36:19,505 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-13T13:36:19,522 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36763 is added to blk_1073741835_1011 (size=405) 2024-11-13T13:36:19,523 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40401 is added to blk_1073741835_1011 (size=405) 2024-11-13T13:36:19,525 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 1fe21c21f4f4b84edaae05b018016164, NAME => 'TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731504979500.1fe21c21f4f4b84edaae05b018016164.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testCompactionRecordDoesntBlockRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:36201/user/jenkins/test-data/9254dffa-9c3f-ea55-6718-6f671995d9b3 2024-11-13T13:36:19,532 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40401 is added to blk_1073741836_1012 (size=88) 2024-11-13T13:36:19,532 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36763 is added to blk_1073741836_1012 (size=88) 2024-11-13T13:36:19,533 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731504979500.1fe21c21f4f4b84edaae05b018016164.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T13:36:19,533 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1722): Closing 1fe21c21f4f4b84edaae05b018016164, disabling compactions & flushes 2024-11-13T13:36:19,533 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731504979500.1fe21c21f4f4b84edaae05b018016164. 2024-11-13T13:36:19,533 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731504979500.1fe21c21f4f4b84edaae05b018016164. 2024-11-13T13:36:19,533 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731504979500.1fe21c21f4f4b84edaae05b018016164. after waiting 0 ms 2024-11-13T13:36:19,533 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731504979500.1fe21c21f4f4b84edaae05b018016164. 2024-11-13T13:36:19,533 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731504979500.1fe21c21f4f4b84edaae05b018016164. 2024-11-13T13:36:19,533 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for 1fe21c21f4f4b84edaae05b018016164: Waiting for close lock at 1731504979533Disabling compacts and flushes for region at 1731504979533Disabling writes for close at 1731504979533Writing region close event to WAL at 1731504979533Closed at 1731504979533 2024-11-13T13:36:19,536 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_ADD_TO_META 2024-11-13T13:36:19,536 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731504979500.1fe21c21f4f4b84edaae05b018016164.","families":{"info":[{"qualifier":"regioninfo","vlen":87,"tag":[],"timestamp":"1731504979536"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731504979536"}]},"ts":"1731504979536"} 2024-11-13T13:36:19,538 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-13T13:36:19,540 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-13T13:36:19,540 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731504979540"}]},"ts":"1731504979540"} 2024-11-13T13:36:19,543 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testCompactionRecordDoesntBlockRolling, state=ENABLING in hbase:meta 2024-11-13T13:36:19,543 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=1fe21c21f4f4b84edaae05b018016164, ASSIGN}] 2024-11-13T13:36:19,544 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=1fe21c21f4f4b84edaae05b018016164, ASSIGN 2024-11-13T13:36:19,546 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=1fe21c21f4f4b84edaae05b018016164, ASSIGN; state=OFFLINE, location=bfeb2336aed7,44923,1731504978242; forceNewPlan=false, retain=false 2024-11-13T13:36:19,697 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=1fe21c21f4f4b84edaae05b018016164, regionState=OPENING, regionLocation=bfeb2336aed7,44923,1731504978242 2024-11-13T13:36:19,702 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-13-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=1fe21c21f4f4b84edaae05b018016164, ASSIGN because future has completed 2024-11-13T13:36:19,703 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1fe21c21f4f4b84edaae05b018016164, server=bfeb2336aed7,44923,1731504978242}] 2024-11-13T13:36:19,869 INFO [RS_OPEN_REGION-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731504979500.1fe21c21f4f4b84edaae05b018016164. 2024-11-13T13:36:19,869 DEBUG [RS_OPEN_REGION-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 1fe21c21f4f4b84edaae05b018016164, NAME => 'TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731504979500.1fe21c21f4f4b84edaae05b018016164.', STARTKEY => '', ENDKEY => ''} 2024-11-13T13:36:19,869 DEBUG [RS_OPEN_REGION-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testCompactionRecordDoesntBlockRolling 1fe21c21f4f4b84edaae05b018016164 2024-11-13T13:36:19,870 DEBUG [RS_OPEN_REGION-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731504979500.1fe21c21f4f4b84edaae05b018016164.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T13:36:19,870 DEBUG [RS_OPEN_REGION-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 1fe21c21f4f4b84edaae05b018016164 2024-11-13T13:36:19,870 DEBUG [RS_OPEN_REGION-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 1fe21c21f4f4b84edaae05b018016164 2024-11-13T13:36:19,871 INFO [StoreOpener-1fe21c21f4f4b84edaae05b018016164-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1fe21c21f4f4b84edaae05b018016164 2024-11-13T13:36:19,873 INFO [StoreOpener-1fe21c21f4f4b84edaae05b018016164-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1fe21c21f4f4b84edaae05b018016164 columnFamilyName info 2024-11-13T13:36:19,873 DEBUG [StoreOpener-1fe21c21f4f4b84edaae05b018016164-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T13:36:19,873 INFO [StoreOpener-1fe21c21f4f4b84edaae05b018016164-1 {}] regionserver.HStore(327): Store=1fe21c21f4f4b84edaae05b018016164/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T13:36:19,873 DEBUG [RS_OPEN_REGION-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 1fe21c21f4f4b84edaae05b018016164 2024-11-13T13:36:19,874 DEBUG [RS_OPEN_REGION-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36201/user/jenkins/test-data/9254dffa-9c3f-ea55-6718-6f671995d9b3/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/1fe21c21f4f4b84edaae05b018016164 2024-11-13T13:36:19,874 DEBUG [RS_OPEN_REGION-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36201/user/jenkins/test-data/9254dffa-9c3f-ea55-6718-6f671995d9b3/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/1fe21c21f4f4b84edaae05b018016164 2024-11-13T13:36:19,875 DEBUG [RS_OPEN_REGION-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 1fe21c21f4f4b84edaae05b018016164 2024-11-13T13:36:19,875 DEBUG [RS_OPEN_REGION-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 1fe21c21f4f4b84edaae05b018016164 2024-11-13T13:36:19,877 DEBUG [RS_OPEN_REGION-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 1fe21c21f4f4b84edaae05b018016164 2024-11-13T13:36:19,879 DEBUG [RS_OPEN_REGION-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36201/user/jenkins/test-data/9254dffa-9c3f-ea55-6718-6f671995d9b3/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/1fe21c21f4f4b84edaae05b018016164/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-13T13:36:19,879 INFO [RS_OPEN_REGION-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 1fe21c21f4f4b84edaae05b018016164; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=864636, jitterRate=0.09944188594818115}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-13T13:36:19,879 DEBUG [RS_OPEN_REGION-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1fe21c21f4f4b84edaae05b018016164 2024-11-13T13:36:19,880 DEBUG [RS_OPEN_REGION-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 1fe21c21f4f4b84edaae05b018016164: Running coprocessor pre-open hook at 1731504979870Writing region info on filesystem at 1731504979870Initializing all the Stores at 1731504979871 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731504979871Cleaning up temporary data from old regions at 1731504979875 (+4 ms)Running coprocessor post-open hooks at 1731504979879 (+4 ms)Region opened successfully at 1731504979880 (+1 ms) 2024-11-13T13:36:19,881 INFO [RS_OPEN_REGION-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731504979500.1fe21c21f4f4b84edaae05b018016164., pid=6, masterSystemTime=1731504979857 2024-11-13T13:36:19,883 DEBUG [RS_OPEN_REGION-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731504979500.1fe21c21f4f4b84edaae05b018016164. 2024-11-13T13:36:19,883 INFO [RS_OPEN_REGION-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731504979500.1fe21c21f4f4b84edaae05b018016164. 2024-11-13T13:36:19,884 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=1fe21c21f4f4b84edaae05b018016164, regionState=OPEN, openSeqNum=2, regionLocation=bfeb2336aed7,44923,1731504978242 2024-11-13T13:36:19,887 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-13-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1fe21c21f4f4b84edaae05b018016164, server=bfeb2336aed7,44923,1731504978242 because future has completed 2024-11-13T13:36:19,892 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-13T13:36:19,892 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 1fe21c21f4f4b84edaae05b018016164, server=bfeb2336aed7,44923,1731504978242 in 186 msec 2024-11-13T13:36:19,896 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-13T13:36:19,896 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=1fe21c21f4f4b84edaae05b018016164, ASSIGN in 349 msec 2024-11-13T13:36:19,897 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-13T13:36:19,898 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731504979897"}]},"ts":"1731504979897"} 2024-11-13T13:36:19,900 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testCompactionRecordDoesntBlockRolling, state=ENABLED in hbase:meta 2024-11-13T13:36:19,902 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_POST_OPERATION 2024-11-13T13:36:19,904 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 402 msec 2024-11-13T13:36:20,105 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,38781,1731504892507/bfeb2336aed7%2C38781%2C1731504892507.1731504892747 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:36:20,105 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,33523,1731504891245/bfeb2336aed7%2C33523%2C1731504891245.meta.1731504892258.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:36:21,106 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,38781,1731504892507/bfeb2336aed7%2C38781%2C1731504892507.1731504892747 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:36:21,106 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,33523,1731504891245/bfeb2336aed7%2C33523%2C1731504891245.meta.1731504892258.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:36:21,948 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-13T13:36:21,948 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-13T13:36:21,948 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-13T13:36:21,949 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-13T13:36:21,949 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-13T13:36:21,949 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-11-13T13:36:21,949 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-13T13:36:21,949 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling Metrics about Tables on a single HBase RegionServer 2024-11-13T13:36:22,107 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,38781,1731504892507/bfeb2336aed7%2C38781%2C1731504892507.1731504892747 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:36:22,107 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,33523,1731504891245/bfeb2336aed7%2C33523%2C1731504891245.meta.1731504892258.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:36:23,107 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,33523,1731504891245/bfeb2336aed7%2C33523%2C1731504891245.meta.1731504892258.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:36:23,107 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,38781,1731504892507/bfeb2336aed7%2C38781%2C1731504892507.1731504892747 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:36:24,108 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,38781,1731504892507/bfeb2336aed7%2C38781%2C1731504892507.1731504892747 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:36:24,108 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,33523,1731504891245/bfeb2336aed7%2C33523%2C1731504891245.meta.1731504892258.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:36:24,328 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:36:24,328 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:36:24,328 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:36:24,328 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:36:24,329 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:36:24,329 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:36:24,348 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:36:24,349 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:36:24,349 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:36:24,349 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:36:24,349 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:36:24,349 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:36:24,354 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:36:24,354 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:36:24,354 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:36:24,356 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:36:24,860 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-13T13:36:24,862 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:36:24,862 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:36:24,863 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:36:24,863 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:36:24,864 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:36:24,864 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:36:24,890 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:36:24,890 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:36:24,890 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:36:24,890 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:36:24,890 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:36:24,891 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:36:24,895 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:36:24,895 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:36:24,896 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:36:24,898 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:36:24,903 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-13T13:36:24,904 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testCompactionRecordDoesntBlockRolling' 2024-11-13T13:36:25,109 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,38781,1731504892507/bfeb2336aed7%2C38781%2C1731504892507.1731504892747 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:36:25,109 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,33523,1731504891245/bfeb2336aed7%2C33523%2C1731504891245.meta.1731504892258.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:36:26,109 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,38781,1731504892507/bfeb2336aed7%2C38781%2C1731504892507.1731504892747 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:36:26,110 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,33523,1731504891245/bfeb2336aed7%2C33523%2C1731504891245.meta.1731504892258.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:36:27,110 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,38781,1731504892507/bfeb2336aed7%2C38781%2C1731504892507.1731504892747 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:36:27,110 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,33523,1731504891245/bfeb2336aed7%2C33523%2C1731504891245.meta.1731504892258.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:36:28,111 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,38781,1731504892507/bfeb2336aed7%2C38781%2C1731504892507.1731504892747 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:36:28,111 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,33523,1731504891245/bfeb2336aed7%2C33523%2C1731504891245.meta.1731504892258.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:36:29,112 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,33523,1731504891245/bfeb2336aed7%2C33523%2C1731504891245.meta.1731504892258.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:36:29,112 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,38781,1731504892507/bfeb2336aed7%2C38781%2C1731504892507.1731504892747 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:36:29,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38899 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-13T13:36:29,522 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-13T13:36:29,522 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testCompactionRecordDoesntBlockRolling,, stopping at row=TestLogRolling-testCompactionRecordDoesntBlockRolling ,, for max=2147483647 with caching=100 2024-11-13T13:36:29,527 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-13T13:36:29,527 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731504979500.1fe21c21f4f4b84edaae05b018016164. 2024-11-13T13:36:29,531 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testCompactionRecordDoesntBlockRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731504979500.1fe21c21f4f4b84edaae05b018016164., hostname=bfeb2336aed7,44923,1731504978242, seqNum=2] 2024-11-13T13:36:29,540 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38899 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-13T13:36:29,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38899 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-13T13:36:29,548 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-13T13:36:29,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38899 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-13T13:36:29,550 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-13T13:36:29,551 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-13T13:36:29,715 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44923 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=8 2024-11-13T13:36:29,716 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bfeb2336aed7:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731504979500.1fe21c21f4f4b84edaae05b018016164. 2024-11-13T13:36:29,716 INFO [RS_FLUSH_OPERATIONS-regionserver/bfeb2336aed7:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2902): Flushing 1fe21c21f4f4b84edaae05b018016164 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-13T13:36:29,731 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bfeb2336aed7:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36201/user/jenkins/test-data/9254dffa-9c3f-ea55-6718-6f671995d9b3/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/1fe21c21f4f4b84edaae05b018016164/.tmp/info/1811b464faa54d3cb14c6e6a337eeedf is 1080, key is row0001/info:/1731504989532/Put/seqid=0 2024-11-13T13:36:29,736 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40401 is added to blk_1073741837_1013 (size=6033) 2024-11-13T13:36:29,737 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36763 is added to blk_1073741837_1013 (size=6033) 2024-11-13T13:36:29,737 INFO [RS_FLUSH_OPERATIONS-regionserver/bfeb2336aed7:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:36201/user/jenkins/test-data/9254dffa-9c3f-ea55-6718-6f671995d9b3/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/1fe21c21f4f4b84edaae05b018016164/.tmp/info/1811b464faa54d3cb14c6e6a337eeedf 2024-11-13T13:36:29,745 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bfeb2336aed7:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36201/user/jenkins/test-data/9254dffa-9c3f-ea55-6718-6f671995d9b3/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/1fe21c21f4f4b84edaae05b018016164/.tmp/info/1811b464faa54d3cb14c6e6a337eeedf as hdfs://localhost:36201/user/jenkins/test-data/9254dffa-9c3f-ea55-6718-6f671995d9b3/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/1fe21c21f4f4b84edaae05b018016164/info/1811b464faa54d3cb14c6e6a337eeedf 2024-11-13T13:36:29,751 INFO [RS_FLUSH_OPERATIONS-regionserver/bfeb2336aed7:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36201/user/jenkins/test-data/9254dffa-9c3f-ea55-6718-6f671995d9b3/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/1fe21c21f4f4b84edaae05b018016164/info/1811b464faa54d3cb14c6e6a337eeedf, entries=1, sequenceid=5, filesize=5.9 K 2024-11-13T13:36:29,752 INFO [RS_FLUSH_OPERATIONS-regionserver/bfeb2336aed7:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 1fe21c21f4f4b84edaae05b018016164 in 36ms, sequenceid=5, compaction requested=false 2024-11-13T13:36:29,752 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bfeb2336aed7:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2603): Flush status journal for 1fe21c21f4f4b84edaae05b018016164: 2024-11-13T13:36:29,752 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bfeb2336aed7:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731504979500.1fe21c21f4f4b84edaae05b018016164. 2024-11-13T13:36:29,754 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bfeb2336aed7:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=8 2024-11-13T13:36:29,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38899 {}] master.HMaster(4169): Remote procedure done, pid=8 2024-11-13T13:36:29,760 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-11-13T13:36:29,760 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 206 msec 2024-11-13T13:36:29,762 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 219 msec 2024-11-13T13:36:30,113 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,33523,1731504891245/bfeb2336aed7%2C33523%2C1731504891245.meta.1731504892258.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:36:30,113 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,38781,1731504892507/bfeb2336aed7%2C38781%2C1731504892507.1731504892747 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:36:31,114 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,38781,1731504892507/bfeb2336aed7%2C38781%2C1731504892507.1731504892747 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:36:31,114 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,33523,1731504891245/bfeb2336aed7%2C33523%2C1731504891245.meta.1731504892258.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:36:32,114 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,33523,1731504891245/bfeb2336aed7%2C33523%2C1731504891245.meta.1731504892258.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:36:32,114 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,38781,1731504892507/bfeb2336aed7%2C38781%2C1731504892507.1731504892747 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:36:33,115 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,33523,1731504891245/bfeb2336aed7%2C33523%2C1731504891245.meta.1731504892258.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:36:33,115 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,38781,1731504892507/bfeb2336aed7%2C38781%2C1731504892507.1731504892747 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:36:34,115 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,38781,1731504892507/bfeb2336aed7%2C38781%2C1731504892507.1731504892747 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:36:34,115 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,33523,1731504891245/bfeb2336aed7%2C33523%2C1731504891245.meta.1731504892258.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:36:35,116 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,33523,1731504891245/bfeb2336aed7%2C33523%2C1731504891245.meta.1731504892258.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:36:35,116 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,38781,1731504892507/bfeb2336aed7%2C38781%2C1731504892507.1731504892747 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:36:36,117 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,38781,1731504892507/bfeb2336aed7%2C38781%2C1731504892507.1731504892747 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:36:36,117 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,33523,1731504891245/bfeb2336aed7%2C33523%2C1731504891245.meta.1731504892258.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:36:37,118 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,38781,1731504892507/bfeb2336aed7%2C38781%2C1731504892507.1731504892747 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:36:37,118 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,33523,1731504891245/bfeb2336aed7%2C33523%2C1731504891245.meta.1731504892258.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:36:38,118 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,33523,1731504891245/bfeb2336aed7%2C33523%2C1731504891245.meta.1731504892258.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:36:38,118 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,38781,1731504892507/bfeb2336aed7%2C38781%2C1731504892507.1731504892747 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:36:39,119 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,33523,1731504891245/bfeb2336aed7%2C33523%2C1731504891245.meta.1731504892258.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:36:39,119 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,38781,1731504892507/bfeb2336aed7%2C38781%2C1731504892507.1731504892747 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:36:39,120 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=2 on file=hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,33523,1731504891245/bfeb2336aed7%2C33523%2C1731504891245.meta.1731504892258.meta after 68047ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor203.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T13:36:39,120 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=2 on file=hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,38781,1731504892507/bfeb2336aed7%2C38781%2C1731504892507.1731504892747 after 68062ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor203.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T13:36:39,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38899 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-13T13:36:39,592 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-13T13:36:39,595 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38899 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-13T13:36:39,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38899 {}] procedure2.ProcedureExecutor(1139): Stored pid=9, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-13T13:36:39,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38899 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=9 2024-11-13T13:36:39,598 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=9, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-13T13:36:39,599 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=9, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-13T13:36:39,599 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=10, ppid=9, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-13T13:36:39,753 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44923 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=10 2024-11-13T13:36:39,753 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bfeb2336aed7:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731504979500.1fe21c21f4f4b84edaae05b018016164. 2024-11-13T13:36:39,754 INFO [RS_FLUSH_OPERATIONS-regionserver/bfeb2336aed7:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(2902): Flushing 1fe21c21f4f4b84edaae05b018016164 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-13T13:36:39,759 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bfeb2336aed7:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36201/user/jenkins/test-data/9254dffa-9c3f-ea55-6718-6f671995d9b3/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/1fe21c21f4f4b84edaae05b018016164/.tmp/info/7a05ca8ba18c447391c557a27f8de3f7 is 1080, key is row0002/info:/1731504999594/Put/seqid=0 2024-11-13T13:36:39,765 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40401 is added to blk_1073741838_1014 (size=6033) 2024-11-13T13:36:39,765 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36763 is added to blk_1073741838_1014 (size=6033) 2024-11-13T13:36:39,766 INFO [RS_FLUSH_OPERATIONS-regionserver/bfeb2336aed7:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=9 (bloomFilter=true), to=hdfs://localhost:36201/user/jenkins/test-data/9254dffa-9c3f-ea55-6718-6f671995d9b3/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/1fe21c21f4f4b84edaae05b018016164/.tmp/info/7a05ca8ba18c447391c557a27f8de3f7 2024-11-13T13:36:39,773 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bfeb2336aed7:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36201/user/jenkins/test-data/9254dffa-9c3f-ea55-6718-6f671995d9b3/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/1fe21c21f4f4b84edaae05b018016164/.tmp/info/7a05ca8ba18c447391c557a27f8de3f7 as hdfs://localhost:36201/user/jenkins/test-data/9254dffa-9c3f-ea55-6718-6f671995d9b3/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/1fe21c21f4f4b84edaae05b018016164/info/7a05ca8ba18c447391c557a27f8de3f7 2024-11-13T13:36:39,780 INFO [RS_FLUSH_OPERATIONS-regionserver/bfeb2336aed7:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36201/user/jenkins/test-data/9254dffa-9c3f-ea55-6718-6f671995d9b3/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/1fe21c21f4f4b84edaae05b018016164/info/7a05ca8ba18c447391c557a27f8de3f7, entries=1, sequenceid=9, filesize=5.9 K 2024-11-13T13:36:39,782 INFO [RS_FLUSH_OPERATIONS-regionserver/bfeb2336aed7:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 1fe21c21f4f4b84edaae05b018016164 in 29ms, sequenceid=9, compaction requested=false 2024-11-13T13:36:39,782 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bfeb2336aed7:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(2603): Flush status journal for 1fe21c21f4f4b84edaae05b018016164: 2024-11-13T13:36:39,782 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bfeb2336aed7:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731504979500.1fe21c21f4f4b84edaae05b018016164. 2024-11-13T13:36:39,782 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bfeb2336aed7:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=10 2024-11-13T13:36:39,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38899 {}] master.HMaster(4169): Remote procedure done, pid=10 2024-11-13T13:36:39,786 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=10, resume processing ppid=9 2024-11-13T13:36:39,786 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=10, ppid=9, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 185 msec 2024-11-13T13:36:39,789 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=9, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 191 msec 2024-11-13T13:36:40,121 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,33523,1731504891245/bfeb2336aed7%2C33523%2C1731504891245.meta.1731504892258.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:36:40,121 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,38781,1731504892507/bfeb2336aed7%2C38781%2C1731504892507.1731504892747 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:36:41,121 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,38781,1731504892507/bfeb2336aed7%2C38781%2C1731504892507.1731504892747 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:36:41,121 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,33523,1731504891245/bfeb2336aed7%2C33523%2C1731504891245.meta.1731504892258.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:36:42,122 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,38781,1731504892507/bfeb2336aed7%2C38781%2C1731504892507.1731504892747 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:36:42,122 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,33523,1731504891245/bfeb2336aed7%2C33523%2C1731504891245.meta.1731504892258.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:36:43,123 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,33523,1731504891245/bfeb2336aed7%2C33523%2C1731504891245.meta.1731504892258.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:36:43,123 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,38781,1731504892507/bfeb2336aed7%2C38781%2C1731504892507.1731504892747 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:36:44,123 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,38781,1731504892507/bfeb2336aed7%2C38781%2C1731504892507.1731504892747 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:36:44,123 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,33523,1731504891245/bfeb2336aed7%2C33523%2C1731504891245.meta.1731504892258.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:36:45,124 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,33523,1731504891245/bfeb2336aed7%2C33523%2C1731504891245.meta.1731504892258.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:36:45,124 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,38781,1731504892507/bfeb2336aed7%2C38781%2C1731504892507.1731504892747 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:36:46,125 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,33523,1731504891245/bfeb2336aed7%2C33523%2C1731504891245.meta.1731504892258.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:36:46,125 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,38781,1731504892507/bfeb2336aed7%2C38781%2C1731504892507.1731504892747 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:36:47,125 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,33523,1731504891245/bfeb2336aed7%2C33523%2C1731504891245.meta.1731504892258.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:36:47,126 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,38781,1731504892507/bfeb2336aed7%2C38781%2C1731504892507.1731504892747 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:36:48,051 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-13T13:36:48,126 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,38781,1731504892507/bfeb2336aed7%2C38781%2C1731504892507.1731504892747 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:36:48,126 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,33523,1731504891245/bfeb2336aed7%2C33523%2C1731504891245.meta.1731504892258.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:36:49,127 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,38781,1731504892507/bfeb2336aed7%2C38781%2C1731504892507.1731504892747 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:36:49,127 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,33523,1731504891245/bfeb2336aed7%2C33523%2C1731504891245.meta.1731504892258.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:36:49,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38899 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=9 2024-11-13T13:36:49,602 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-13T13:36:49,606 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor bfeb2336aed7%2C44923%2C1731504978242.1731505009606 2024-11-13T13:36:49,613 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:36:49,613 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:36:49,613 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:36:49,613 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:36:49,614 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:36:49,614 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/9254dffa-9c3f-ea55-6718-6f671995d9b3/WALs/bfeb2336aed7,44923,1731504978242/bfeb2336aed7%2C44923%2C1731504978242.1731504978871 with entries=8, filesize=5.41 KB; new WAL /user/jenkins/test-data/9254dffa-9c3f-ea55-6718-6f671995d9b3/WALs/bfeb2336aed7,44923,1731504978242/bfeb2336aed7%2C44923%2C1731504978242.1731505009606 2024-11-13T13:36:49,617 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36763 is added to blk_1073741833_1009 (size=5546) 2024-11-13T13:36:49,617 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40401 is added to blk_1073741833_1009 (size=5546) 2024-11-13T13:36:49,620 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38119:38119),(127.0.0.1/127.0.0.1:43813:43813)] 2024-11-13T13:36:49,621 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38899 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-13T13:36:49,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38899 {}] procedure2.ProcedureExecutor(1139): Stored pid=11, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-13T13:36:49,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38899 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=11 2024-11-13T13:36:49,624 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=11, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-13T13:36:49,625 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=11, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-13T13:36:49,625 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=12, ppid=11, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-13T13:36:49,779 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44923 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=12 2024-11-13T13:36:49,779 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bfeb2336aed7:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731504979500.1fe21c21f4f4b84edaae05b018016164. 2024-11-13T13:36:49,780 INFO [RS_FLUSH_OPERATIONS-regionserver/bfeb2336aed7:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(2902): Flushing 1fe21c21f4f4b84edaae05b018016164 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-13T13:36:49,787 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bfeb2336aed7:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36201/user/jenkins/test-data/9254dffa-9c3f-ea55-6718-6f671995d9b3/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/1fe21c21f4f4b84edaae05b018016164/.tmp/info/52b4a37cc2d845348d96cca8d5ce2df3 is 1080, key is row0003/info:/1731505009604/Put/seqid=0 2024-11-13T13:36:49,793 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36763 is added to blk_1073741840_1016 (size=6033) 2024-11-13T13:36:49,793 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40401 is added to blk_1073741840_1016 (size=6033) 2024-11-13T13:36:49,793 INFO [RS_FLUSH_OPERATIONS-regionserver/bfeb2336aed7:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:36201/user/jenkins/test-data/9254dffa-9c3f-ea55-6718-6f671995d9b3/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/1fe21c21f4f4b84edaae05b018016164/.tmp/info/52b4a37cc2d845348d96cca8d5ce2df3 2024-11-13T13:36:49,801 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bfeb2336aed7:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36201/user/jenkins/test-data/9254dffa-9c3f-ea55-6718-6f671995d9b3/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/1fe21c21f4f4b84edaae05b018016164/.tmp/info/52b4a37cc2d845348d96cca8d5ce2df3 as hdfs://localhost:36201/user/jenkins/test-data/9254dffa-9c3f-ea55-6718-6f671995d9b3/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/1fe21c21f4f4b84edaae05b018016164/info/52b4a37cc2d845348d96cca8d5ce2df3 2024-11-13T13:36:49,807 INFO [RS_FLUSH_OPERATIONS-regionserver/bfeb2336aed7:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36201/user/jenkins/test-data/9254dffa-9c3f-ea55-6718-6f671995d9b3/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/1fe21c21f4f4b84edaae05b018016164/info/52b4a37cc2d845348d96cca8d5ce2df3, entries=1, sequenceid=13, filesize=5.9 K 2024-11-13T13:36:49,808 INFO [RS_FLUSH_OPERATIONS-regionserver/bfeb2336aed7:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 1fe21c21f4f4b84edaae05b018016164 in 28ms, sequenceid=13, compaction requested=true 2024-11-13T13:36:49,808 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bfeb2336aed7:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(2603): Flush status journal for 1fe21c21f4f4b84edaae05b018016164: 2024-11-13T13:36:49,808 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bfeb2336aed7:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731504979500.1fe21c21f4f4b84edaae05b018016164. 2024-11-13T13:36:49,808 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bfeb2336aed7:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=12 2024-11-13T13:36:49,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38899 {}] master.HMaster(4169): Remote procedure done, pid=12 2024-11-13T13:36:49,812 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=12, resume processing ppid=11 2024-11-13T13:36:49,812 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=12, ppid=11, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 184 msec 2024-11-13T13:36:49,814 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=11, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 192 msec 2024-11-13T13:36:50,128 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,33523,1731504891245/bfeb2336aed7%2C33523%2C1731504891245.meta.1731504892258.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:36:50,128 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,38781,1731504892507/bfeb2336aed7%2C38781%2C1731504892507.1731504892747 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:36:51,129 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,38781,1731504892507/bfeb2336aed7%2C38781%2C1731504892507.1731504892747 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:36:51,129 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,33523,1731504891245/bfeb2336aed7%2C33523%2C1731504891245.meta.1731504892258.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:36:52,129 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,33523,1731504891245/bfeb2336aed7%2C33523%2C1731504891245.meta.1731504892258.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:36:52,129 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,38781,1731504892507/bfeb2336aed7%2C38781%2C1731504892507.1731504892747 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:36:53,130 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,33523,1731504891245/bfeb2336aed7%2C33523%2C1731504891245.meta.1731504892258.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:36:53,130 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,38781,1731504892507/bfeb2336aed7%2C38781%2C1731504892507.1731504892747 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:36:54,131 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,38781,1731504892507/bfeb2336aed7%2C38781%2C1731504892507.1731504892747 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:36:54,131 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,33523,1731504891245/bfeb2336aed7%2C33523%2C1731504891245.meta.1731504892258.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:36:55,132 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,38781,1731504892507/bfeb2336aed7%2C38781%2C1731504892507.1731504892747 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:36:55,132 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,33523,1731504891245/bfeb2336aed7%2C33523%2C1731504891245.meta.1731504892258.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:36:56,133 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,38781,1731504892507/bfeb2336aed7%2C38781%2C1731504892507.1731504892747 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:36:56,133 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,33523,1731504891245/bfeb2336aed7%2C33523%2C1731504891245.meta.1731504892258.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:36:57,133 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,38781,1731504892507/bfeb2336aed7%2C38781%2C1731504892507.1731504892747 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:36:57,133 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,33523,1731504891245/bfeb2336aed7%2C33523%2C1731504891245.meta.1731504892258.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:36:58,134 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,38781,1731504892507/bfeb2336aed7%2C38781%2C1731504892507.1731504892747 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:36:58,134 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,33523,1731504891245/bfeb2336aed7%2C33523%2C1731504891245.meta.1731504892258.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:36:59,135 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,38781,1731504892507/bfeb2336aed7%2C38781%2C1731504892507.1731504892747 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:36:59,135 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,33523,1731504891245/bfeb2336aed7%2C33523%2C1731504891245.meta.1731504892258.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:36:59,416 INFO [master/bfeb2336aed7:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-13T13:36:59,416 INFO [master/bfeb2336aed7:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-13T13:36:59,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38899 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=11 2024-11-13T13:36:59,732 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-13T13:36:59,732 DEBUG [Time-limited test {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-13T13:36:59,734 DEBUG [Time-limited test {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 18099 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-13T13:36:59,734 DEBUG [Time-limited test {}] regionserver.HStore(1541): 1fe21c21f4f4b84edaae05b018016164/info is initiating minor compaction (all files) 2024-11-13T13:36:59,734 INFO [Time-limited test {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-13T13:36:59,734 INFO [Time-limited test {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-13T13:36:59,735 INFO [Time-limited test {}] regionserver.HRegion(2416): Starting compaction of 1fe21c21f4f4b84edaae05b018016164/info in TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731504979500.1fe21c21f4f4b84edaae05b018016164. 2024-11-13T13:36:59,735 INFO [Time-limited test {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36201/user/jenkins/test-data/9254dffa-9c3f-ea55-6718-6f671995d9b3/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/1fe21c21f4f4b84edaae05b018016164/info/1811b464faa54d3cb14c6e6a337eeedf, hdfs://localhost:36201/user/jenkins/test-data/9254dffa-9c3f-ea55-6718-6f671995d9b3/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/1fe21c21f4f4b84edaae05b018016164/info/7a05ca8ba18c447391c557a27f8de3f7, hdfs://localhost:36201/user/jenkins/test-data/9254dffa-9c3f-ea55-6718-6f671995d9b3/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/1fe21c21f4f4b84edaae05b018016164/info/52b4a37cc2d845348d96cca8d5ce2df3] into tmpdir=hdfs://localhost:36201/user/jenkins/test-data/9254dffa-9c3f-ea55-6718-6f671995d9b3/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/1fe21c21f4f4b84edaae05b018016164/.tmp, totalSize=17.7 K 2024-11-13T13:36:59,736 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting 1811b464faa54d3cb14c6e6a337eeedf, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=5, earliestPutTs=1731504989532 2024-11-13T13:36:59,736 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting 7a05ca8ba18c447391c557a27f8de3f7, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=9, earliestPutTs=1731504999594 2024-11-13T13:36:59,737 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting 52b4a37cc2d845348d96cca8d5ce2df3, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1731505009604 2024-11-13T13:36:59,760 INFO [Time-limited test {}] throttle.PressureAwareThroughputController(145): 1fe21c21f4f4b84edaae05b018016164#info#compaction#45 average throughput is 3.08 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-13T13:36:59,761 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36201/user/jenkins/test-data/9254dffa-9c3f-ea55-6718-6f671995d9b3/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/1fe21c21f4f4b84edaae05b018016164/.tmp/info/43f38550cca946a19f2d38b284de152c is 1080, key is row0001/info:/1731504989532/Put/seqid=0 2024-11-13T13:36:59,765 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36763 is added to blk_1073741841_1017 (size=8296) 2024-11-13T13:36:59,766 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40401 is added to blk_1073741841_1017 (size=8296) 2024-11-13T13:36:59,773 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36201/user/jenkins/test-data/9254dffa-9c3f-ea55-6718-6f671995d9b3/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/1fe21c21f4f4b84edaae05b018016164/.tmp/info/43f38550cca946a19f2d38b284de152c as hdfs://localhost:36201/user/jenkins/test-data/9254dffa-9c3f-ea55-6718-6f671995d9b3/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/1fe21c21f4f4b84edaae05b018016164/info/43f38550cca946a19f2d38b284de152c 2024-11-13T13:36:59,780 INFO [Time-limited test {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 1fe21c21f4f4b84edaae05b018016164/info of 1fe21c21f4f4b84edaae05b018016164 into 43f38550cca946a19f2d38b284de152c(size=8.1 K), total size for store is 8.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-13T13:36:59,781 DEBUG [Time-limited test {}] regionserver.HRegion(2446): Compaction status journal for 1fe21c21f4f4b84edaae05b018016164: 2024-11-13T13:36:59,784 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor bfeb2336aed7%2C44923%2C1731504978242.1731505019783 2024-11-13T13:36:59,789 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:36:59,790 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:36:59,790 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:36:59,790 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:36:59,790 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:36:59,790 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/9254dffa-9c3f-ea55-6718-6f671995d9b3/WALs/bfeb2336aed7,44923,1731504978242/bfeb2336aed7%2C44923%2C1731504978242.1731505009606 with entries=4, filesize=2.45 KB; new WAL /user/jenkins/test-data/9254dffa-9c3f-ea55-6718-6f671995d9b3/WALs/bfeb2336aed7,44923,1731504978242/bfeb2336aed7%2C44923%2C1731504978242.1731505019783 2024-11-13T13:36:59,791 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43813:43813),(127.0.0.1/127.0.0.1:38119:38119)] 2024-11-13T13:36:59,791 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:36201/user/jenkins/test-data/9254dffa-9c3f-ea55-6718-6f671995d9b3/WALs/bfeb2336aed7,44923,1731504978242/bfeb2336aed7%2C44923%2C1731504978242.1731505009606 is not closed yet, will try archiving it next time 2024-11-13T13:36:59,791 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36763 is added to blk_1073741839_1015 (size=2520) 2024-11-13T13:36:59,792 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:36201/user/jenkins/test-data/9254dffa-9c3f-ea55-6718-6f671995d9b3/WALs/bfeb2336aed7,44923,1731504978242/bfeb2336aed7%2C44923%2C1731504978242.1731504978871 to hdfs://localhost:36201/user/jenkins/test-data/9254dffa-9c3f-ea55-6718-6f671995d9b3/oldWALs/bfeb2336aed7%2C44923%2C1731504978242.1731504978871 2024-11-13T13:36:59,792 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40401 is added to blk_1073741839_1015 (size=2520) 2024-11-13T13:36:59,792 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38899 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-13T13:36:59,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38899 {}] procedure2.ProcedureExecutor(1139): Stored pid=13, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-13T13:36:59,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38899 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=13 2024-11-13T13:36:59,795 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=13, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-13T13:36:59,796 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=13, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-13T13:36:59,796 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=14, ppid=13, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-13T13:36:59,949 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44923 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=14 2024-11-13T13:36:59,949 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bfeb2336aed7:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731504979500.1fe21c21f4f4b84edaae05b018016164. 2024-11-13T13:36:59,949 INFO [RS_FLUSH_OPERATIONS-regionserver/bfeb2336aed7:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(2902): Flushing 1fe21c21f4f4b84edaae05b018016164 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-13T13:36:59,954 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bfeb2336aed7:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36201/user/jenkins/test-data/9254dffa-9c3f-ea55-6718-6f671995d9b3/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/1fe21c21f4f4b84edaae05b018016164/.tmp/info/1c8459ea140f4a85bd3df3cce4419b95 is 1080, key is row0000/info:/1731505019782/Put/seqid=0 2024-11-13T13:36:59,958 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36763 is added to blk_1073741843_1019 (size=6033) 2024-11-13T13:36:59,959 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40401 is added to blk_1073741843_1019 (size=6033) 2024-11-13T13:36:59,959 INFO [RS_FLUSH_OPERATIONS-regionserver/bfeb2336aed7:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=18 (bloomFilter=true), to=hdfs://localhost:36201/user/jenkins/test-data/9254dffa-9c3f-ea55-6718-6f671995d9b3/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/1fe21c21f4f4b84edaae05b018016164/.tmp/info/1c8459ea140f4a85bd3df3cce4419b95 2024-11-13T13:36:59,966 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bfeb2336aed7:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36201/user/jenkins/test-data/9254dffa-9c3f-ea55-6718-6f671995d9b3/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/1fe21c21f4f4b84edaae05b018016164/.tmp/info/1c8459ea140f4a85bd3df3cce4419b95 as hdfs://localhost:36201/user/jenkins/test-data/9254dffa-9c3f-ea55-6718-6f671995d9b3/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/1fe21c21f4f4b84edaae05b018016164/info/1c8459ea140f4a85bd3df3cce4419b95 2024-11-13T13:36:59,972 INFO [RS_FLUSH_OPERATIONS-regionserver/bfeb2336aed7:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36201/user/jenkins/test-data/9254dffa-9c3f-ea55-6718-6f671995d9b3/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/1fe21c21f4f4b84edaae05b018016164/info/1c8459ea140f4a85bd3df3cce4419b95, entries=1, sequenceid=18, filesize=5.9 K 2024-11-13T13:36:59,973 INFO [RS_FLUSH_OPERATIONS-regionserver/bfeb2336aed7:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 1fe21c21f4f4b84edaae05b018016164 in 24ms, sequenceid=18, compaction requested=false 2024-11-13T13:36:59,973 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bfeb2336aed7:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(2603): Flush status journal for 1fe21c21f4f4b84edaae05b018016164: 2024-11-13T13:36:59,973 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bfeb2336aed7:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731504979500.1fe21c21f4f4b84edaae05b018016164. 2024-11-13T13:36:59,973 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bfeb2336aed7:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=14 2024-11-13T13:36:59,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38899 {}] master.HMaster(4169): Remote procedure done, pid=14 2024-11-13T13:36:59,977 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=14, resume processing ppid=13 2024-11-13T13:36:59,977 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=14, ppid=13, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 179 msec 2024-11-13T13:36:59,980 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=13, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 186 msec 2024-11-13T13:37:00,136 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,38781,1731504892507/bfeb2336aed7%2C38781%2C1731504892507.1731504892747 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:37:00,136 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,33523,1731504891245/bfeb2336aed7%2C33523%2C1731504891245.meta.1731504892258.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:37:01,136 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,38781,1731504892507/bfeb2336aed7%2C38781%2C1731504892507.1731504892747 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:37:01,136 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,33523,1731504891245/bfeb2336aed7%2C33523%2C1731504891245.meta.1731504892258.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:37:02,137 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,38781,1731504892507/bfeb2336aed7%2C38781%2C1731504892507.1731504892747 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:37:02,137 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,33523,1731504891245/bfeb2336aed7%2C33523%2C1731504891245.meta.1731504892258.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:37:03,138 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,33523,1731504891245/bfeb2336aed7%2C33523%2C1731504891245.meta.1731504892258.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:37:03,138 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,38781,1731504892507/bfeb2336aed7%2C38781%2C1731504892507.1731504892747 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:37:04,138 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,38781,1731504892507/bfeb2336aed7%2C38781%2C1731504892507.1731504892747 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:37:04,138 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,33523,1731504891245/bfeb2336aed7%2C33523%2C1731504891245.meta.1731504892258.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:37:04,870 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 1fe21c21f4f4b84edaae05b018016164, had cached 0 bytes from a total of 14329 2024-11-13T13:37:05,139 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,33523,1731504891245/bfeb2336aed7%2C33523%2C1731504891245.meta.1731504892258.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:37:05,139 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,38781,1731504892507/bfeb2336aed7%2C38781%2C1731504892507.1731504892747 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:37:06,140 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,33523,1731504891245/bfeb2336aed7%2C33523%2C1731504891245.meta.1731504892258.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:37:06,140 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,38781,1731504892507/bfeb2336aed7%2C38781%2C1731504892507.1731504892747 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:37:07,141 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,33523,1731504891245/bfeb2336aed7%2C33523%2C1731504891245.meta.1731504892258.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:37:07,141 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,38781,1731504892507/bfeb2336aed7%2C38781%2C1731504892507.1731504892747 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:37:08,141 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,33523,1731504891245/bfeb2336aed7%2C33523%2C1731504891245.meta.1731504892258.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:37:08,141 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,38781,1731504892507/bfeb2336aed7%2C38781%2C1731504892507.1731504892747 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:37:09,142 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,33523,1731504891245/bfeb2336aed7%2C33523%2C1731504891245.meta.1731504892258.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:37:09,142 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,38781,1731504892507/bfeb2336aed7%2C38781%2C1731504892507.1731504892747 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:37:09,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38899 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=13 2024-11-13T13:37:09,812 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-13T13:37:09,816 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor bfeb2336aed7%2C44923%2C1731504978242.1731505029816 2024-11-13T13:37:09,825 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:37:09,825 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:37:09,825 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:37:09,825 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:37:09,825 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:37:09,825 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/9254dffa-9c3f-ea55-6718-6f671995d9b3/WALs/bfeb2336aed7,44923,1731504978242/bfeb2336aed7%2C44923%2C1731504978242.1731505019783 with entries=3, filesize=1.97 KB; new WAL /user/jenkins/test-data/9254dffa-9c3f-ea55-6718-6f671995d9b3/WALs/bfeb2336aed7,44923,1731504978242/bfeb2336aed7%2C44923%2C1731504978242.1731505029816 2024-11-13T13:37:09,826 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43813:43813),(127.0.0.1/127.0.0.1:38119:38119)] 2024-11-13T13:37:09,826 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:36201/user/jenkins/test-data/9254dffa-9c3f-ea55-6718-6f671995d9b3/WALs/bfeb2336aed7,44923,1731504978242/bfeb2336aed7%2C44923%2C1731504978242.1731505019783 is not closed yet, will try archiving it next time 2024-11-13T13:37:09,826 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:36201/user/jenkins/test-data/9254dffa-9c3f-ea55-6718-6f671995d9b3/WALs/bfeb2336aed7,44923,1731504978242/bfeb2336aed7%2C44923%2C1731504978242.1731505009606 to hdfs://localhost:36201/user/jenkins/test-data/9254dffa-9c3f-ea55-6718-6f671995d9b3/oldWALs/bfeb2336aed7%2C44923%2C1731504978242.1731505009606 2024-11-13T13:37:09,826 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-13T13:37:09,826 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-13T13:37:09,827 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-13T13:37:09,827 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T13:37:09,827 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T13:37:09,827 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-13T13:37:09,827 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-13T13:37:09,827 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1696646448, stopped=false 2024-11-13T13:37:09,827 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40401 is added to blk_1073741842_1018 (size=2026) 2024-11-13T13:37:09,827 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=bfeb2336aed7,38899,1731504978074 2024-11-13T13:37:09,828 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36763 is added to blk_1073741842_1018 (size=2026) 2024-11-13T13:37:09,868 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38899-0x101346934310000, quorum=127.0.0.1:51252, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-13T13:37:09,868 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44923-0x101346934310001, quorum=127.0.0.1:51252, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-13T13:37:09,868 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44923-0x101346934310001, quorum=127.0.0.1:51252, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T13:37:09,868 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38899-0x101346934310000, quorum=127.0.0.1:51252, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T13:37:09,868 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-13T13:37:09,868 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-13T13:37:09,869 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-13T13:37:09,869 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T13:37:09,869 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:44923-0x101346934310001, quorum=127.0.0.1:51252, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-13T13:37:09,869 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:38899-0x101346934310000, quorum=127.0.0.1:51252, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-13T13:37:09,869 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'bfeb2336aed7,44923,1731504978242' ***** 2024-11-13T13:37:09,869 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-13T13:37:09,870 INFO [RS:0;bfeb2336aed7:44923 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-13T13:37:09,870 INFO [RS:0;bfeb2336aed7:44923 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-13T13:37:09,870 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-13T13:37:09,870 INFO [RS:0;bfeb2336aed7:44923 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-13T13:37:09,870 INFO [RS:0;bfeb2336aed7:44923 {}] regionserver.HRegionServer(3091): Received CLOSE for 1fe21c21f4f4b84edaae05b018016164 2024-11-13T13:37:09,870 INFO [RS:0;bfeb2336aed7:44923 {}] regionserver.HRegionServer(959): stopping server bfeb2336aed7,44923,1731504978242 2024-11-13T13:37:09,870 INFO [RS:0;bfeb2336aed7:44923 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-13T13:37:09,871 INFO [RS:0;bfeb2336aed7:44923 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;bfeb2336aed7:44923. 2024-11-13T13:37:09,871 DEBUG [RS_CLOSE_REGION-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 1fe21c21f4f4b84edaae05b018016164, disabling compactions & flushes 2024-11-13T13:37:09,871 INFO [RS_CLOSE_REGION-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731504979500.1fe21c21f4f4b84edaae05b018016164. 2024-11-13T13:37:09,871 DEBUG [RS:0;bfeb2336aed7:44923 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-13T13:37:09,871 DEBUG [RS_CLOSE_REGION-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731504979500.1fe21c21f4f4b84edaae05b018016164. 2024-11-13T13:37:09,871 DEBUG [RS:0;bfeb2336aed7:44923 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T13:37:09,871 DEBUG [RS_CLOSE_REGION-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731504979500.1fe21c21f4f4b84edaae05b018016164. after waiting 0 ms 2024-11-13T13:37:09,871 DEBUG [RS_CLOSE_REGION-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731504979500.1fe21c21f4f4b84edaae05b018016164. 2024-11-13T13:37:09,871 INFO [RS:0;bfeb2336aed7:44923 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-13T13:37:09,871 INFO [RS:0;bfeb2336aed7:44923 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-13T13:37:09,871 INFO [RS:0;bfeb2336aed7:44923 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-13T13:37:09,871 INFO [RS_CLOSE_REGION-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing 1fe21c21f4f4b84edaae05b018016164 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-13T13:37:09,871 INFO [RS:0;bfeb2336aed7:44923 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-13T13:37:09,872 INFO [RS:0;bfeb2336aed7:44923 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-13T13:37:09,872 DEBUG [RS:0;bfeb2336aed7:44923 {}] regionserver.HRegionServer(1325): Online Regions={1fe21c21f4f4b84edaae05b018016164=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731504979500.1fe21c21f4f4b84edaae05b018016164., 1588230740=hbase:meta,,1.1588230740} 2024-11-13T13:37:09,872 DEBUG [RS:0;bfeb2336aed7:44923 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 1fe21c21f4f4b84edaae05b018016164 2024-11-13T13:37:09,872 DEBUG [RS_CLOSE_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-13T13:37:09,873 INFO [RS_CLOSE_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-13T13:37:09,873 DEBUG [RS_CLOSE_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-13T13:37:09,873 DEBUG [RS_CLOSE_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-13T13:37:09,873 DEBUG [RS_CLOSE_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-13T13:37:09,873 INFO [RS_CLOSE_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.89 KB heapSize=3.91 KB 2024-11-13T13:37:09,878 DEBUG [RS_CLOSE_REGION-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36201/user/jenkins/test-data/9254dffa-9c3f-ea55-6718-6f671995d9b3/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/1fe21c21f4f4b84edaae05b018016164/.tmp/info/563375e1e8cd4aad97ac9d2898d8c411 is 1080, key is row0001/info:/1731505029814/Put/seqid=0 2024-11-13T13:37:09,882 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40401 is added to blk_1073741845_1021 (size=6033) 2024-11-13T13:37:09,882 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36763 is added to blk_1073741845_1021 (size=6033) 2024-11-13T13:37:09,883 INFO [RS_CLOSE_REGION-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=22 (bloomFilter=true), to=hdfs://localhost:36201/user/jenkins/test-data/9254dffa-9c3f-ea55-6718-6f671995d9b3/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/1fe21c21f4f4b84edaae05b018016164/.tmp/info/563375e1e8cd4aad97ac9d2898d8c411 2024-11-13T13:37:09,891 DEBUG [RS_CLOSE_REGION-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36201/user/jenkins/test-data/9254dffa-9c3f-ea55-6718-6f671995d9b3/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/1fe21c21f4f4b84edaae05b018016164/.tmp/info/563375e1e8cd4aad97ac9d2898d8c411 as hdfs://localhost:36201/user/jenkins/test-data/9254dffa-9c3f-ea55-6718-6f671995d9b3/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/1fe21c21f4f4b84edaae05b018016164/info/563375e1e8cd4aad97ac9d2898d8c411 2024-11-13T13:37:09,892 DEBUG [RS_CLOSE_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36201/user/jenkins/test-data/9254dffa-9c3f-ea55-6718-6f671995d9b3/data/hbase/meta/1588230740/.tmp/info/fd41f580256547cb8f43ff89f680ca16 is 227, key is TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731504979500.1fe21c21f4f4b84edaae05b018016164./info:regioninfo/1731504979884/Put/seqid=0 2024-11-13T13:37:09,897 INFO [RS_CLOSE_REGION-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36201/user/jenkins/test-data/9254dffa-9c3f-ea55-6718-6f671995d9b3/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/1fe21c21f4f4b84edaae05b018016164/info/563375e1e8cd4aad97ac9d2898d8c411, entries=1, sequenceid=22, filesize=5.9 K 2024-11-13T13:37:09,898 INFO [RS_CLOSE_REGION-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 1fe21c21f4f4b84edaae05b018016164 in 27ms, sequenceid=22, compaction requested=true 2024-11-13T13:37:09,898 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36763 is added to blk_1073741846_1022 (size=7308) 2024-11-13T13:37:09,898 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731504979500.1fe21c21f4f4b84edaae05b018016164.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:36201/user/jenkins/test-data/9254dffa-9c3f-ea55-6718-6f671995d9b3/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/1fe21c21f4f4b84edaae05b018016164/info/1811b464faa54d3cb14c6e6a337eeedf, hdfs://localhost:36201/user/jenkins/test-data/9254dffa-9c3f-ea55-6718-6f671995d9b3/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/1fe21c21f4f4b84edaae05b018016164/info/7a05ca8ba18c447391c557a27f8de3f7, hdfs://localhost:36201/user/jenkins/test-data/9254dffa-9c3f-ea55-6718-6f671995d9b3/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/1fe21c21f4f4b84edaae05b018016164/info/52b4a37cc2d845348d96cca8d5ce2df3] to archive 2024-11-13T13:37:09,898 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40401 is added to blk_1073741846_1022 (size=7308) 2024-11-13T13:37:09,899 INFO [RS_CLOSE_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.65 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:36201/user/jenkins/test-data/9254dffa-9c3f-ea55-6718-6f671995d9b3/data/hbase/meta/1588230740/.tmp/info/fd41f580256547cb8f43ff89f680ca16 2024-11-13T13:37:09,899 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731504979500.1fe21c21f4f4b84edaae05b018016164.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-13T13:37:09,901 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731504979500.1fe21c21f4f4b84edaae05b018016164.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36201/user/jenkins/test-data/9254dffa-9c3f-ea55-6718-6f671995d9b3/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/1fe21c21f4f4b84edaae05b018016164/info/1811b464faa54d3cb14c6e6a337eeedf to hdfs://localhost:36201/user/jenkins/test-data/9254dffa-9c3f-ea55-6718-6f671995d9b3/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/1fe21c21f4f4b84edaae05b018016164/info/1811b464faa54d3cb14c6e6a337eeedf 2024-11-13T13:37:09,902 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731504979500.1fe21c21f4f4b84edaae05b018016164.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36201/user/jenkins/test-data/9254dffa-9c3f-ea55-6718-6f671995d9b3/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/1fe21c21f4f4b84edaae05b018016164/info/7a05ca8ba18c447391c557a27f8de3f7 to hdfs://localhost:36201/user/jenkins/test-data/9254dffa-9c3f-ea55-6718-6f671995d9b3/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/1fe21c21f4f4b84edaae05b018016164/info/7a05ca8ba18c447391c557a27f8de3f7 2024-11-13T13:37:09,903 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731504979500.1fe21c21f4f4b84edaae05b018016164.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36201/user/jenkins/test-data/9254dffa-9c3f-ea55-6718-6f671995d9b3/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/1fe21c21f4f4b84edaae05b018016164/info/52b4a37cc2d845348d96cca8d5ce2df3 to hdfs://localhost:36201/user/jenkins/test-data/9254dffa-9c3f-ea55-6718-6f671995d9b3/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/1fe21c21f4f4b84edaae05b018016164/info/52b4a37cc2d845348d96cca8d5ce2df3 2024-11-13T13:37:09,904 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731504979500.1fe21c21f4f4b84edaae05b018016164.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=bfeb2336aed7:38899 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 16 more 2024-11-13T13:37:09,904 WARN [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731504979500.1fe21c21f4f4b84edaae05b018016164.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [1811b464faa54d3cb14c6e6a337eeedf=6033, 7a05ca8ba18c447391c557a27f8de3f7=6033, 52b4a37cc2d845348d96cca8d5ce2df3=6033] 2024-11-13T13:37:09,912 DEBUG [RS_CLOSE_REGION-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36201/user/jenkins/test-data/9254dffa-9c3f-ea55-6718-6f671995d9b3/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/1fe21c21f4f4b84edaae05b018016164/recovered.edits/25.seqid, newMaxSeqId=25, maxSeqId=1 2024-11-13T13:37:09,913 INFO [RS_CLOSE_REGION-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731504979500.1fe21c21f4f4b84edaae05b018016164. 2024-11-13T13:37:09,913 DEBUG [RS_CLOSE_REGION-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 1fe21c21f4f4b84edaae05b018016164: Waiting for close lock at 1731505029870Running coprocessor pre-close hooks at 1731505029870Disabling compacts and flushes for region at 1731505029870Disabling writes for close at 1731505029871 (+1 ms)Obtaining lock to block concurrent updates at 1731505029871Preparing flush snapshotting stores in 1fe21c21f4f4b84edaae05b018016164 at 1731505029871Finished memstore snapshotting TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731504979500.1fe21c21f4f4b84edaae05b018016164., syncing WAL and waiting on mvcc, flushsize=dataSize=1076, getHeapSize=1392, getOffHeapSize=0, getCellsCount=1 at 1731505029871Flushing stores of TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731504979500.1fe21c21f4f4b84edaae05b018016164. at 1731505029872 (+1 ms)Flushing 1fe21c21f4f4b84edaae05b018016164/info: creating writer at 1731505029872Flushing 1fe21c21f4f4b84edaae05b018016164/info: appending metadata at 1731505029877 (+5 ms)Flushing 1fe21c21f4f4b84edaae05b018016164/info: closing flushed file at 1731505029877Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@36433d22: reopening flushed file at 1731505029890 (+13 ms)Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 1fe21c21f4f4b84edaae05b018016164 in 27ms, sequenceid=22, compaction requested=true at 1731505029898 (+8 ms)Writing region close event to WAL at 1731505029909 (+11 ms)Running coprocessor post-close hooks at 1731505029913 (+4 ms)Closed at 1731505029913 2024-11-13T13:37:09,913 DEBUG [RS_CLOSE_REGION-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731504979500.1fe21c21f4f4b84edaae05b018016164. 2024-11-13T13:37:09,924 DEBUG [RS_CLOSE_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36201/user/jenkins/test-data/9254dffa-9c3f-ea55-6718-6f671995d9b3/data/hbase/meta/1588230740/.tmp/ns/76a0bbbcca944140b8f783fda05a5a0c is 43, key is default/ns:d/1731504979401/Put/seqid=0 2024-11-13T13:37:09,928 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40401 is added to blk_1073741847_1023 (size=5153) 2024-11-13T13:37:09,928 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36763 is added to blk_1073741847_1023 (size=5153) 2024-11-13T13:37:09,929 INFO [RS_CLOSE_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:36201/user/jenkins/test-data/9254dffa-9c3f-ea55-6718-6f671995d9b3/data/hbase/meta/1588230740/.tmp/ns/76a0bbbcca944140b8f783fda05a5a0c 2024-11-13T13:37:09,949 DEBUG [RS_CLOSE_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36201/user/jenkins/test-data/9254dffa-9c3f-ea55-6718-6f671995d9b3/data/hbase/meta/1588230740/.tmp/table/03d6fa9c108a44888e63abc3e6d84e4d is 89, key is TestLogRolling-testCompactionRecordDoesntBlockRolling/table:state/1731504979897/Put/seqid=0 2024-11-13T13:37:09,953 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36763 is added to blk_1073741848_1024 (size=5508) 2024-11-13T13:37:09,953 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40401 is added to blk_1073741848_1024 (size=5508) 2024-11-13T13:37:09,953 INFO [RS_CLOSE_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=170 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:36201/user/jenkins/test-data/9254dffa-9c3f-ea55-6718-6f671995d9b3/data/hbase/meta/1588230740/.tmp/table/03d6fa9c108a44888e63abc3e6d84e4d 2024-11-13T13:37:09,960 DEBUG [RS_CLOSE_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36201/user/jenkins/test-data/9254dffa-9c3f-ea55-6718-6f671995d9b3/data/hbase/meta/1588230740/.tmp/info/fd41f580256547cb8f43ff89f680ca16 as hdfs://localhost:36201/user/jenkins/test-data/9254dffa-9c3f-ea55-6718-6f671995d9b3/data/hbase/meta/1588230740/info/fd41f580256547cb8f43ff89f680ca16 2024-11-13T13:37:09,966 INFO [RS_CLOSE_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36201/user/jenkins/test-data/9254dffa-9c3f-ea55-6718-6f671995d9b3/data/hbase/meta/1588230740/info/fd41f580256547cb8f43ff89f680ca16, entries=10, sequenceid=11, filesize=7.1 K 2024-11-13T13:37:09,967 DEBUG [RS_CLOSE_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36201/user/jenkins/test-data/9254dffa-9c3f-ea55-6718-6f671995d9b3/data/hbase/meta/1588230740/.tmp/ns/76a0bbbcca944140b8f783fda05a5a0c as hdfs://localhost:36201/user/jenkins/test-data/9254dffa-9c3f-ea55-6718-6f671995d9b3/data/hbase/meta/1588230740/ns/76a0bbbcca944140b8f783fda05a5a0c 2024-11-13T13:37:09,973 INFO [RS_CLOSE_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36201/user/jenkins/test-data/9254dffa-9c3f-ea55-6718-6f671995d9b3/data/hbase/meta/1588230740/ns/76a0bbbcca944140b8f783fda05a5a0c, entries=2, sequenceid=11, filesize=5.0 K 2024-11-13T13:37:09,974 DEBUG [RS_CLOSE_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36201/user/jenkins/test-data/9254dffa-9c3f-ea55-6718-6f671995d9b3/data/hbase/meta/1588230740/.tmp/table/03d6fa9c108a44888e63abc3e6d84e4d as hdfs://localhost:36201/user/jenkins/test-data/9254dffa-9c3f-ea55-6718-6f671995d9b3/data/hbase/meta/1588230740/table/03d6fa9c108a44888e63abc3e6d84e4d 2024-11-13T13:37:09,980 INFO [RS_CLOSE_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36201/user/jenkins/test-data/9254dffa-9c3f-ea55-6718-6f671995d9b3/data/hbase/meta/1588230740/table/03d6fa9c108a44888e63abc3e6d84e4d, entries=2, sequenceid=11, filesize=5.4 K 2024-11-13T13:37:09,981 INFO [RS_CLOSE_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.89 KB/1932, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 1588230740 in 108ms, sequenceid=11, compaction requested=false 2024-11-13T13:37:09,986 DEBUG [RS_CLOSE_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36201/user/jenkins/test-data/9254dffa-9c3f-ea55-6718-6f671995d9b3/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-13T13:37:09,986 DEBUG [RS_CLOSE_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-13T13:37:09,986 INFO [RS_CLOSE_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-13T13:37:09,986 DEBUG [RS_CLOSE_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731505029872Running coprocessor pre-close hooks at 1731505029872Disabling compacts and flushes for region at 1731505029872Disabling writes for close at 1731505029873 (+1 ms)Obtaining lock to block concurrent updates at 1731505029873Preparing flush snapshotting stores in 1588230740 at 1731505029873Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1932, getHeapSize=3936, getOffHeapSize=0, getCellsCount=14 at 1731505029873Flushing stores of hbase:meta,,1.1588230740 at 1731505029874 (+1 ms)Flushing 1588230740/info: creating writer at 1731505029874Flushing 1588230740/info: appending metadata at 1731505029891 (+17 ms)Flushing 1588230740/info: closing flushed file at 1731505029891Flushing 1588230740/ns: creating writer at 1731505029904 (+13 ms)Flushing 1588230740/ns: appending metadata at 1731505029923 (+19 ms)Flushing 1588230740/ns: closing flushed file at 1731505029923Flushing 1588230740/table: creating writer at 1731505029934 (+11 ms)Flushing 1588230740/table: appending metadata at 1731505029948 (+14 ms)Flushing 1588230740/table: closing flushed file at 1731505029948Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@275bd9c0: reopening flushed file at 1731505029959 (+11 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@123ea253: reopening flushed file at 1731505029966 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@51abf1c3: reopening flushed file at 1731505029973 (+7 ms)Finished flush of dataSize ~1.89 KB/1932, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 1588230740 in 108ms, sequenceid=11, compaction requested=false at 1731505029981 (+8 ms)Writing region close event to WAL at 1731505029982 (+1 ms)Running coprocessor post-close hooks at 1731505029986 (+4 ms)Closed at 1731505029986 2024-11-13T13:37:09,987 DEBUG [RS_CLOSE_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-13T13:37:10,072 INFO [RS:0;bfeb2336aed7:44923 {}] regionserver.HRegionServer(976): stopping server bfeb2336aed7,44923,1731504978242; all regions closed. 2024-11-13T13:37:10,073 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:37:10,073 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:37:10,074 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:37:10,074 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:37:10,074 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:37:10,076 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40401 is added to blk_1073741834_1010 (size=3306) 2024-11-13T13:37:10,077 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36763 is added to blk_1073741834_1010 (size=3306) 2024-11-13T13:37:10,079 DEBUG [RS:0;bfeb2336aed7:44923 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/9254dffa-9c3f-ea55-6718-6f671995d9b3/oldWALs 2024-11-13T13:37:10,079 INFO [RS:0;bfeb2336aed7:44923 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog bfeb2336aed7%2C44923%2C1731504978242.meta:.meta(num 1731504979312) 2024-11-13T13:37:10,080 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:37:10,080 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:37:10,080 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:37:10,080 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:37:10,080 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:37:10,082 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40401 is added to blk_1073741844_1020 (size=1252) 2024-11-13T13:37:10,082 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36763 is added to blk_1073741844_1020 (size=1252) 2024-11-13T13:37:10,086 DEBUG [RS:0;bfeb2336aed7:44923 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/9254dffa-9c3f-ea55-6718-6f671995d9b3/oldWALs 2024-11-13T13:37:10,086 INFO [RS:0;bfeb2336aed7:44923 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog bfeb2336aed7%2C44923%2C1731504978242:(num 1731505029816) 2024-11-13T13:37:10,086 DEBUG [RS:0;bfeb2336aed7:44923 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T13:37:10,086 INFO [RS:0;bfeb2336aed7:44923 {}] regionserver.LeaseManager(133): Closed leases 2024-11-13T13:37:10,086 INFO [RS:0;bfeb2336aed7:44923 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-13T13:37:10,086 INFO [RS:0;bfeb2336aed7:44923 {}] hbase.ChoreService(370): Chore service for: regionserver/bfeb2336aed7:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-13T13:37:10,086 INFO [RS:0;bfeb2336aed7:44923 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-13T13:37:10,086 INFO [regionserver/bfeb2336aed7:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-13T13:37:10,087 INFO [RS:0;bfeb2336aed7:44923 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:44923 2024-11-13T13:37:10,110 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44923-0x101346934310001, quorum=127.0.0.1:51252, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/bfeb2336aed7,44923,1731504978242 2024-11-13T13:37:10,110 INFO [RS:0;bfeb2336aed7:44923 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-13T13:37:10,110 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38899-0x101346934310000, quorum=127.0.0.1:51252, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-13T13:37:10,120 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [bfeb2336aed7,44923,1731504978242] 2024-11-13T13:37:10,130 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/bfeb2336aed7,44923,1731504978242 already deleted, retry=false 2024-11-13T13:37:10,131 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; bfeb2336aed7,44923,1731504978242 expired; onlineServers=0 2024-11-13T13:37:10,131 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'bfeb2336aed7,38899,1731504978074' ***** 2024-11-13T13:37:10,131 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-13T13:37:10,131 INFO [M:0;bfeb2336aed7:38899 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-13T13:37:10,131 INFO [M:0;bfeb2336aed7:38899 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-13T13:37:10,131 DEBUG [M:0;bfeb2336aed7:38899 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-13T13:37:10,131 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-13T13:37:10,131 DEBUG [M:0;bfeb2336aed7:38899 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-13T13:37:10,131 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster-HFileCleaner.small.0-1731504978637 {}] cleaner.HFileCleaner(306): Exit Thread[master/bfeb2336aed7:0:becomeActiveMaster-HFileCleaner.small.0-1731504978637,5,FailOnTimeoutGroup] 2024-11-13T13:37:10,131 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster-HFileCleaner.large.0-1731504978636 {}] cleaner.HFileCleaner(306): Exit Thread[master/bfeb2336aed7:0:becomeActiveMaster-HFileCleaner.large.0-1731504978636,5,FailOnTimeoutGroup] 2024-11-13T13:37:10,131 INFO [M:0;bfeb2336aed7:38899 {}] hbase.ChoreService(370): Chore service for: master/bfeb2336aed7:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-13T13:37:10,131 INFO [M:0;bfeb2336aed7:38899 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-13T13:37:10,131 DEBUG [M:0;bfeb2336aed7:38899 {}] master.HMaster(1795): Stopping service threads 2024-11-13T13:37:10,131 INFO [M:0;bfeb2336aed7:38899 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-13T13:37:10,132 INFO [M:0;bfeb2336aed7:38899 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-13T13:37:10,132 INFO [M:0;bfeb2336aed7:38899 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-13T13:37:10,132 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-13T13:37:10,141 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38899-0x101346934310000, quorum=127.0.0.1:51252, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-13T13:37:10,141 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38899-0x101346934310000, quorum=127.0.0.1:51252, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T13:37:10,141 DEBUG [M:0;bfeb2336aed7:38899 {}] zookeeper.ZKUtil(347): master:38899-0x101346934310000, quorum=127.0.0.1:51252, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-13T13:37:10,141 WARN [M:0;bfeb2336aed7:38899 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-13T13:37:10,142 INFO [M:0;bfeb2336aed7:38899 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:36201/user/jenkins/test-data/9254dffa-9c3f-ea55-6718-6f671995d9b3/.lastflushedseqids 2024-11-13T13:37:10,143 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,38781,1731504892507/bfeb2336aed7%2C38781%2C1731504892507.1731504892747 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:37:10,143 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,33523,1731504891245/bfeb2336aed7%2C33523%2C1731504891245.meta.1731504892258.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:37:10,149 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40401 is added to blk_1073741849_1025 (size=130) 2024-11-13T13:37:10,149 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36763 is added to blk_1073741849_1025 (size=130) 2024-11-13T13:37:10,149 INFO [M:0;bfeb2336aed7:38899 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-13T13:37:10,149 INFO [M:0;bfeb2336aed7:38899 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-13T13:37:10,150 DEBUG [M:0;bfeb2336aed7:38899 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-13T13:37:10,150 INFO [M:0;bfeb2336aed7:38899 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T13:37:10,150 DEBUG [M:0;bfeb2336aed7:38899 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T13:37:10,150 DEBUG [M:0;bfeb2336aed7:38899 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-13T13:37:10,150 DEBUG [M:0;bfeb2336aed7:38899 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T13:37:10,150 INFO [M:0;bfeb2336aed7:38899 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=43.55 KB heapSize=54.91 KB 2024-11-13T13:37:10,167 DEBUG [M:0;bfeb2336aed7:38899 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36201/user/jenkins/test-data/9254dffa-9c3f-ea55-6718-6f671995d9b3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/6d6e68dc36d74a0485423aff329603e1 is 82, key is hbase:meta,,1/info:regioninfo/1731504979343/Put/seqid=0 2024-11-13T13:37:10,172 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36763 is added to blk_1073741850_1026 (size=5672) 2024-11-13T13:37:10,172 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40401 is added to blk_1073741850_1026 (size=5672) 2024-11-13T13:37:10,172 INFO [M:0;bfeb2336aed7:38899 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:36201/user/jenkins/test-data/9254dffa-9c3f-ea55-6718-6f671995d9b3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/6d6e68dc36d74a0485423aff329603e1 2024-11-13T13:37:10,194 DEBUG [M:0;bfeb2336aed7:38899 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36201/user/jenkins/test-data/9254dffa-9c3f-ea55-6718-6f671995d9b3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/b527c1af3d824b7e883373cac5951618 is 798, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1731504979903/Put/seqid=0 2024-11-13T13:37:10,198 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36763 is added to blk_1073741851_1027 (size=7818) 2024-11-13T13:37:10,198 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40401 is added to blk_1073741851_1027 (size=7818) 2024-11-13T13:37:10,199 INFO [M:0;bfeb2336aed7:38899 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.95 KB at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:36201/user/jenkins/test-data/9254dffa-9c3f-ea55-6718-6f671995d9b3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/b527c1af3d824b7e883373cac5951618 2024-11-13T13:37:10,203 INFO [M:0;bfeb2336aed7:38899 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for b527c1af3d824b7e883373cac5951618 2024-11-13T13:37:10,220 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44923-0x101346934310001, quorum=127.0.0.1:51252, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-13T13:37:10,220 INFO [RS:0;bfeb2336aed7:44923 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-13T13:37:10,220 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44923-0x101346934310001, quorum=127.0.0.1:51252, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-13T13:37:10,220 INFO [RS:0;bfeb2336aed7:44923 {}] regionserver.HRegionServer(1031): Exiting; stopping=bfeb2336aed7,44923,1731504978242; zookeeper connection closed. 2024-11-13T13:37:10,221 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@3e6ed23a {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@3e6ed23a 2024-11-13T13:37:10,221 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-13T13:37:10,223 DEBUG [M:0;bfeb2336aed7:38899 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36201/user/jenkins/test-data/9254dffa-9c3f-ea55-6718-6f671995d9b3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/fcf979c896d041bd8cb173a23e400068 is 69, key is bfeb2336aed7,44923,1731504978242/rs:state/1731504978710/Put/seqid=0 2024-11-13T13:37:10,227 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36763 is added to blk_1073741852_1028 (size=5156) 2024-11-13T13:37:10,228 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40401 is added to blk_1073741852_1028 (size=5156) 2024-11-13T13:37:10,228 INFO [M:0;bfeb2336aed7:38899 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:36201/user/jenkins/test-data/9254dffa-9c3f-ea55-6718-6f671995d9b3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/fcf979c896d041bd8cb173a23e400068 2024-11-13T13:37:10,248 DEBUG [M:0;bfeb2336aed7:38899 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36201/user/jenkins/test-data/9254dffa-9c3f-ea55-6718-6f671995d9b3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/447408883120440ba4b0a841629242f2 is 52, key is load_balancer_on/state:d/1731504979496/Put/seqid=0 2024-11-13T13:37:10,252 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36763 is added to blk_1073741853_1029 (size=5056) 2024-11-13T13:37:10,252 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40401 is added to blk_1073741853_1029 (size=5056) 2024-11-13T13:37:10,253 INFO [M:0;bfeb2336aed7:38899 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:36201/user/jenkins/test-data/9254dffa-9c3f-ea55-6718-6f671995d9b3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/447408883120440ba4b0a841629242f2 2024-11-13T13:37:10,258 DEBUG [M:0;bfeb2336aed7:38899 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36201/user/jenkins/test-data/9254dffa-9c3f-ea55-6718-6f671995d9b3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/6d6e68dc36d74a0485423aff329603e1 as hdfs://localhost:36201/user/jenkins/test-data/9254dffa-9c3f-ea55-6718-6f671995d9b3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/6d6e68dc36d74a0485423aff329603e1 2024-11-13T13:37:10,263 INFO [M:0;bfeb2336aed7:38899 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36201/user/jenkins/test-data/9254dffa-9c3f-ea55-6718-6f671995d9b3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/6d6e68dc36d74a0485423aff329603e1, entries=8, sequenceid=121, filesize=5.5 K 2024-11-13T13:37:10,265 DEBUG [M:0;bfeb2336aed7:38899 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36201/user/jenkins/test-data/9254dffa-9c3f-ea55-6718-6f671995d9b3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/b527c1af3d824b7e883373cac5951618 as hdfs://localhost:36201/user/jenkins/test-data/9254dffa-9c3f-ea55-6718-6f671995d9b3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/b527c1af3d824b7e883373cac5951618 2024-11-13T13:37:10,270 INFO [M:0;bfeb2336aed7:38899 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for b527c1af3d824b7e883373cac5951618 2024-11-13T13:37:10,270 INFO [M:0;bfeb2336aed7:38899 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36201/user/jenkins/test-data/9254dffa-9c3f-ea55-6718-6f671995d9b3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/b527c1af3d824b7e883373cac5951618, entries=14, sequenceid=121, filesize=7.6 K 2024-11-13T13:37:10,271 DEBUG [M:0;bfeb2336aed7:38899 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36201/user/jenkins/test-data/9254dffa-9c3f-ea55-6718-6f671995d9b3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/fcf979c896d041bd8cb173a23e400068 as hdfs://localhost:36201/user/jenkins/test-data/9254dffa-9c3f-ea55-6718-6f671995d9b3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/fcf979c896d041bd8cb173a23e400068 2024-11-13T13:37:10,277 INFO [M:0;bfeb2336aed7:38899 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36201/user/jenkins/test-data/9254dffa-9c3f-ea55-6718-6f671995d9b3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/fcf979c896d041bd8cb173a23e400068, entries=1, sequenceid=121, filesize=5.0 K 2024-11-13T13:37:10,278 DEBUG [M:0;bfeb2336aed7:38899 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36201/user/jenkins/test-data/9254dffa-9c3f-ea55-6718-6f671995d9b3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/447408883120440ba4b0a841629242f2 as hdfs://localhost:36201/user/jenkins/test-data/9254dffa-9c3f-ea55-6718-6f671995d9b3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/447408883120440ba4b0a841629242f2 2024-11-13T13:37:10,284 INFO [M:0;bfeb2336aed7:38899 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36201/user/jenkins/test-data/9254dffa-9c3f-ea55-6718-6f671995d9b3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/447408883120440ba4b0a841629242f2, entries=1, sequenceid=121, filesize=4.9 K 2024-11-13T13:37:10,285 INFO [M:0;bfeb2336aed7:38899 {}] regionserver.HRegion(3140): Finished flush of dataSize ~43.55 KB/44593, heapSize ~54.85 KB/56168, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 135ms, sequenceid=121, compaction requested=false 2024-11-13T13:37:10,286 INFO [M:0;bfeb2336aed7:38899 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T13:37:10,286 DEBUG [M:0;bfeb2336aed7:38899 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731505030150Disabling compacts and flushes for region at 1731505030150Disabling writes for close at 1731505030150Obtaining lock to block concurrent updates at 1731505030150Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731505030150Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=44593, getHeapSize=56168, getOffHeapSize=0, getCellsCount=140 at 1731505030150Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731505030151 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731505030151Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731505030166 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731505030166Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731505030176 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731505030193 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731505030193Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731505030204 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731505030222 (+18 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731505030222Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731505030233 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731505030247 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731505030247Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@732eb059: reopening flushed file at 1731505030257 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@360311bc: reopening flushed file at 1731505030264 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3a42b23a: reopening flushed file at 1731505030270 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5ec38100: reopening flushed file at 1731505030277 (+7 ms)Finished flush of dataSize ~43.55 KB/44593, heapSize ~54.85 KB/56168, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 135ms, sequenceid=121, compaction requested=false at 1731505030285 (+8 ms)Writing region close event to WAL at 1731505030286 (+1 ms)Closed at 1731505030286 2024-11-13T13:37:10,286 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:37:10,286 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:37:10,287 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:37:10,287 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:37:10,287 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:37:10,288 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36763 is added to blk_1073741830_1006 (size=52990) 2024-11-13T13:37:10,288 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40401 is added to blk_1073741830_1006 (size=52990) 2024-11-13T13:37:10,289 INFO [M:0;bfeb2336aed7:38899 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-13T13:37:10,289 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-13T13:37:10,289 INFO [M:0;bfeb2336aed7:38899 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:38899 2024-11-13T13:37:10,289 INFO [M:0;bfeb2336aed7:38899 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-13T13:37:10,431 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38899-0x101346934310000, quorum=127.0.0.1:51252, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-13T13:37:10,431 INFO [M:0;bfeb2336aed7:38899 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-13T13:37:10,431 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38899-0x101346934310000, quorum=127.0.0.1:51252, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-13T13:37:10,469 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@67b4013d{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-13T13:37:10,470 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@1c429e05{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-13T13:37:10,470 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-13T13:37:10,470 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@74e6f5d9{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-13T13:37:10,470 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@30873421{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/10b34d63-e17b-0bfe-df33-b4ed44c431d1/hadoop.log.dir/,STOPPED} 2024-11-13T13:37:10,472 WARN [BP-755367798-172.17.0.2-1731504975433 heartbeating to localhost/127.0.0.1:36201 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-13T13:37:10,472 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-13T13:37:10,472 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-13T13:37:10,472 WARN [BP-755367798-172.17.0.2-1731504975433 heartbeating to localhost/127.0.0.1:36201 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-755367798-172.17.0.2-1731504975433 (Datanode Uuid 2f5a8021-f972-4bd3-8a64-3cd5e38e7238) service to localhost/127.0.0.1:36201 2024-11-13T13:37:10,473 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/10b34d63-e17b-0bfe-df33-b4ed44c431d1/cluster_3d9d16a9-7805-2f9f-6cf6-09135c2106e7/data/data3/current/BP-755367798-172.17.0.2-1731504975433 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-13T13:37:10,473 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/10b34d63-e17b-0bfe-df33-b4ed44c431d1/cluster_3d9d16a9-7805-2f9f-6cf6-09135c2106e7/data/data4/current/BP-755367798-172.17.0.2-1731504975433 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-13T13:37:10,473 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-13T13:37:10,476 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@375c379c{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-13T13:37:10,476 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3d40a54d{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-13T13:37:10,476 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-13T13:37:10,476 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5b135886{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-13T13:37:10,476 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4e873b68{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/10b34d63-e17b-0bfe-df33-b4ed44c431d1/hadoop.log.dir/,STOPPED} 2024-11-13T13:37:10,478 WARN [BP-755367798-172.17.0.2-1731504975433 heartbeating to localhost/127.0.0.1:36201 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-13T13:37:10,478 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-13T13:37:10,478 WARN [BP-755367798-172.17.0.2-1731504975433 heartbeating to localhost/127.0.0.1:36201 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-755367798-172.17.0.2-1731504975433 (Datanode Uuid 72c88717-ed1e-4154-8577-25817a6a2bb1) service to localhost/127.0.0.1:36201 2024-11-13T13:37:10,478 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-13T13:37:10,479 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/10b34d63-e17b-0bfe-df33-b4ed44c431d1/cluster_3d9d16a9-7805-2f9f-6cf6-09135c2106e7/data/data1/current/BP-755367798-172.17.0.2-1731504975433 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-13T13:37:10,479 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/10b34d63-e17b-0bfe-df33-b4ed44c431d1/cluster_3d9d16a9-7805-2f9f-6cf6-09135c2106e7/data/data2/current/BP-755367798-172.17.0.2-1731504975433 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-13T13:37:10,479 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-13T13:37:10,486 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@677a249b{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-13T13:37:10,486 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@67c2b9b{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-13T13:37:10,487 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-13T13:37:10,487 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1106c0e7{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-13T13:37:10,487 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1d6dee42{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/10b34d63-e17b-0bfe-df33-b4ed44c431d1/hadoop.log.dir/,STOPPED} 2024-11-13T13:37:10,494 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-13T13:37:10,516 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-13T13:37:10,523 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testCompactionRecordDoesntBlockRolling Thread=210 (was 184) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:36201 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-36-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-34-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:36201 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: LeaseRenewer:jenkins@localhost:36201 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:36201 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-37-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:36201 from jenkins.hfs.5 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-16 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-35-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-34-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-34-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-35-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-35-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-12-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:36201 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-37-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.5@localhost:36201 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-36-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:36201 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-37-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-12-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-36-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-12-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=483 (was 457) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=285 (was 239) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=3252 (was 3444) 2024-11-13T13:37:10,532 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRolling Thread=210, OpenFileDescriptor=483, MaxFileDescriptor=1048576, SystemLoadAverage=285, ProcessCount=11, AvailableMemoryMB=3252 2024-11-13T13:37:10,532 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-13T13:37:10,532 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/10b34d63-e17b-0bfe-df33-b4ed44c431d1/hadoop.log.dir so I do NOT create it in target/test-data/20aa6c7e-d63c-e9d6-7376-290e0252652e 2024-11-13T13:37:10,533 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/10b34d63-e17b-0bfe-df33-b4ed44c431d1/hadoop.tmp.dir so I do NOT create it in target/test-data/20aa6c7e-d63c-e9d6-7376-290e0252652e 2024-11-13T13:37:10,533 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/20aa6c7e-d63c-e9d6-7376-290e0252652e/cluster_4c473c0e-5978-a336-d575-91d8f4138688, deleteOnExit=true 2024-11-13T13:37:10,533 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-13T13:37:10,533 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/20aa6c7e-d63c-e9d6-7376-290e0252652e/test.cache.data in system properties and HBase conf 2024-11-13T13:37:10,533 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/20aa6c7e-d63c-e9d6-7376-290e0252652e/hadoop.tmp.dir in system properties and HBase conf 2024-11-13T13:37:10,533 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/20aa6c7e-d63c-e9d6-7376-290e0252652e/hadoop.log.dir in system properties and HBase conf 2024-11-13T13:37:10,533 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/20aa6c7e-d63c-e9d6-7376-290e0252652e/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-13T13:37:10,533 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/20aa6c7e-d63c-e9d6-7376-290e0252652e/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-13T13:37:10,533 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-13T13:37:10,533 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-13T13:37:10,534 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/20aa6c7e-d63c-e9d6-7376-290e0252652e/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-13T13:37:10,534 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/20aa6c7e-d63c-e9d6-7376-290e0252652e/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-13T13:37:10,534 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/20aa6c7e-d63c-e9d6-7376-290e0252652e/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-13T13:37:10,534 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/20aa6c7e-d63c-e9d6-7376-290e0252652e/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-13T13:37:10,534 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/20aa6c7e-d63c-e9d6-7376-290e0252652e/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-13T13:37:10,534 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/20aa6c7e-d63c-e9d6-7376-290e0252652e/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-13T13:37:10,534 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/20aa6c7e-d63c-e9d6-7376-290e0252652e/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-13T13:37:10,534 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/20aa6c7e-d63c-e9d6-7376-290e0252652e/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-13T13:37:10,534 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/20aa6c7e-d63c-e9d6-7376-290e0252652e/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-13T13:37:10,534 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/20aa6c7e-d63c-e9d6-7376-290e0252652e/nfs.dump.dir in system properties and HBase conf 2024-11-13T13:37:10,534 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/20aa6c7e-d63c-e9d6-7376-290e0252652e/java.io.tmpdir in system properties and HBase conf 2024-11-13T13:37:10,534 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/20aa6c7e-d63c-e9d6-7376-290e0252652e/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-13T13:37:10,535 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/20aa6c7e-d63c-e9d6-7376-290e0252652e/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-13T13:37:10,535 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/20aa6c7e-d63c-e9d6-7376-290e0252652e/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-13T13:37:10,547 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-13T13:37:10,735 INFO [regionserver/bfeb2336aed7:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-13T13:37:10,955 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-13T13:37:10,959 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-13T13:37:10,960 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-13T13:37:10,960 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-13T13:37:10,960 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-13T13:37:10,960 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-13T13:37:10,961 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4881a2ed{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/20aa6c7e-d63c-e9d6-7376-290e0252652e/hadoop.log.dir/,AVAILABLE} 2024-11-13T13:37:10,961 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@26881465{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-13T13:37:11,069 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@652d6e37{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/20aa6c7e-d63c-e9d6-7376-290e0252652e/java.io.tmpdir/jetty-localhost-43963-hadoop-hdfs-3_4_1-tests_jar-_-any-3208432440293616648/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-13T13:37:11,069 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@444d0b71{HTTP/1.1, (http/1.1)}{localhost:43963} 2024-11-13T13:37:11,070 INFO [Time-limited test {}] server.Server(415): Started @245757ms 2024-11-13T13:37:11,100 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-13T13:37:11,143 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,38781,1731504892507/bfeb2336aed7%2C38781%2C1731504892507.1731504892747 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:37:11,143 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,33523,1731504891245/bfeb2336aed7%2C33523%2C1731504891245.meta.1731504892258.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:37:11,358 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-13T13:37:11,361 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-13T13:37:11,361 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-13T13:37:11,361 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-13T13:37:11,361 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-13T13:37:11,362 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5551c062{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/20aa6c7e-d63c-e9d6-7376-290e0252652e/hadoop.log.dir/,AVAILABLE} 2024-11-13T13:37:11,362 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7c0c72e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-13T13:37:11,465 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3e4bbe36{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/20aa6c7e-d63c-e9d6-7376-290e0252652e/java.io.tmpdir/jetty-localhost-34767-hadoop-hdfs-3_4_1-tests_jar-_-any-129728609841669841/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-13T13:37:11,466 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@601b78f7{HTTP/1.1, (http/1.1)}{localhost:34767} 2024-11-13T13:37:11,466 INFO [Time-limited test {}] server.Server(415): Started @246153ms 2024-11-13T13:37:11,467 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-13T13:37:11,497 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-13T13:37:11,501 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-13T13:37:11,502 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-13T13:37:11,502 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-13T13:37:11,502 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-13T13:37:11,502 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6a5db76d{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/20aa6c7e-d63c-e9d6-7376-290e0252652e/hadoop.log.dir/,AVAILABLE} 2024-11-13T13:37:11,503 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@286b8c80{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-13T13:37:11,615 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@72925ee1{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/20aa6c7e-d63c-e9d6-7376-290e0252652e/java.io.tmpdir/jetty-localhost-42149-hadoop-hdfs-3_4_1-tests_jar-_-any-17868977176140256249/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-13T13:37:11,615 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6f4aa33e{HTTP/1.1, (http/1.1)}{localhost:42149} 2024-11-13T13:37:11,615 INFO [Time-limited test {}] server.Server(415): Started @246303ms 2024-11-13T13:37:11,616 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-13T13:37:11,948 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-13T13:37:11,948 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-13T13:37:11,948 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-13T13:37:11,949 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-13T13:37:12,144 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,33523,1731504891245/bfeb2336aed7%2C33523%2C1731504891245.meta.1731504892258.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:37:12,144 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,38781,1731504892507/bfeb2336aed7%2C38781%2C1731504892507.1731504892747 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:37:12,483 WARN [Thread-1969 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/20aa6c7e-d63c-e9d6-7376-290e0252652e/cluster_4c473c0e-5978-a336-d575-91d8f4138688/data/data1/current/BP-521197480-172.17.0.2-1731505030550/current, will proceed with Du for space computation calculation, 2024-11-13T13:37:12,483 WARN [Thread-1970 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/20aa6c7e-d63c-e9d6-7376-290e0252652e/cluster_4c473c0e-5978-a336-d575-91d8f4138688/data/data2/current/BP-521197480-172.17.0.2-1731505030550/current, will proceed with Du for space computation calculation, 2024-11-13T13:37:12,502 WARN [Thread-1933 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-13T13:37:12,504 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x3814e7139f50f2cc with lease ID 0xc5c397ee189a520e: Processing first storage report for DS-21eba927-3bc1-415f-ab0e-3a244ff4e19b from datanode DatanodeRegistration(127.0.0.1:40741, datanodeUuid=21c33374-c183-492e-ad31-1675c2231117, infoPort=41091, infoSecurePort=0, ipcPort=36001, storageInfo=lv=-57;cid=testClusterID;nsid=466722925;c=1731505030550) 2024-11-13T13:37:12,504 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x3814e7139f50f2cc with lease ID 0xc5c397ee189a520e: from storage DS-21eba927-3bc1-415f-ab0e-3a244ff4e19b node DatanodeRegistration(127.0.0.1:40741, datanodeUuid=21c33374-c183-492e-ad31-1675c2231117, infoPort=41091, infoSecurePort=0, ipcPort=36001, storageInfo=lv=-57;cid=testClusterID;nsid=466722925;c=1731505030550), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-13T13:37:12,504 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x3814e7139f50f2cc with lease ID 0xc5c397ee189a520e: Processing first storage report for DS-81cc72d8-6c9d-4eca-a383-d6874c24f35a from datanode DatanodeRegistration(127.0.0.1:40741, datanodeUuid=21c33374-c183-492e-ad31-1675c2231117, infoPort=41091, infoSecurePort=0, ipcPort=36001, storageInfo=lv=-57;cid=testClusterID;nsid=466722925;c=1731505030550) 2024-11-13T13:37:12,504 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x3814e7139f50f2cc with lease ID 0xc5c397ee189a520e: from storage DS-81cc72d8-6c9d-4eca-a383-d6874c24f35a node DatanodeRegistration(127.0.0.1:40741, datanodeUuid=21c33374-c183-492e-ad31-1675c2231117, infoPort=41091, infoSecurePort=0, ipcPort=36001, storageInfo=lv=-57;cid=testClusterID;nsid=466722925;c=1731505030550), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-13T13:37:12,675 WARN [Thread-1980 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/20aa6c7e-d63c-e9d6-7376-290e0252652e/cluster_4c473c0e-5978-a336-d575-91d8f4138688/data/data3/current/BP-521197480-172.17.0.2-1731505030550/current, will proceed with Du for space computation calculation, 2024-11-13T13:37:12,675 WARN [Thread-1981 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/20aa6c7e-d63c-e9d6-7376-290e0252652e/cluster_4c473c0e-5978-a336-d575-91d8f4138688/data/data4/current/BP-521197480-172.17.0.2-1731505030550/current, will proceed with Du for space computation calculation, 2024-11-13T13:37:12,694 WARN [Thread-1956 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-13T13:37:12,700 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xd4aceaf181ad9054 with lease ID 0xc5c397ee189a520f: Processing first storage report for DS-7aa4cc79-1756-4f0e-b8b3-9e835b0c1106 from datanode DatanodeRegistration(127.0.0.1:33351, datanodeUuid=d1b6e1b6-6758-48f3-bd11-c93d7e409ab8, infoPort=41057, infoSecurePort=0, ipcPort=46671, storageInfo=lv=-57;cid=testClusterID;nsid=466722925;c=1731505030550) 2024-11-13T13:37:12,700 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xd4aceaf181ad9054 with lease ID 0xc5c397ee189a520f: from storage DS-7aa4cc79-1756-4f0e-b8b3-9e835b0c1106 node DatanodeRegistration(127.0.0.1:33351, datanodeUuid=d1b6e1b6-6758-48f3-bd11-c93d7e409ab8, infoPort=41057, infoSecurePort=0, ipcPort=46671, storageInfo=lv=-57;cid=testClusterID;nsid=466722925;c=1731505030550), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-13T13:37:12,700 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xd4aceaf181ad9054 with lease ID 0xc5c397ee189a520f: Processing first storage report for DS-97cf754d-4fc2-4a47-812e-eceefb23087b from datanode DatanodeRegistration(127.0.0.1:33351, datanodeUuid=d1b6e1b6-6758-48f3-bd11-c93d7e409ab8, infoPort=41057, infoSecurePort=0, ipcPort=46671, storageInfo=lv=-57;cid=testClusterID;nsid=466722925;c=1731505030550) 2024-11-13T13:37:12,700 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xd4aceaf181ad9054 with lease ID 0xc5c397ee189a520f: from storage DS-97cf754d-4fc2-4a47-812e-eceefb23087b node DatanodeRegistration(127.0.0.1:33351, datanodeUuid=d1b6e1b6-6758-48f3-bd11-c93d7e409ab8, infoPort=41057, infoSecurePort=0, ipcPort=46671, storageInfo=lv=-57;cid=testClusterID;nsid=466722925;c=1731505030550), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-13T13:37:12,747 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/20aa6c7e-d63c-e9d6-7376-290e0252652e 2024-11-13T13:37:12,751 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/20aa6c7e-d63c-e9d6-7376-290e0252652e/cluster_4c473c0e-5978-a336-d575-91d8f4138688/zookeeper_0, clientPort=54356, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/20aa6c7e-d63c-e9d6-7376-290e0252652e/cluster_4c473c0e-5978-a336-d575-91d8f4138688/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/20aa6c7e-d63c-e9d6-7376-290e0252652e/cluster_4c473c0e-5978-a336-d575-91d8f4138688/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-13T13:37:12,752 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=54356 2024-11-13T13:37:12,752 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T13:37:12,753 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T13:37:12,761 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33351 is added to blk_1073741825_1001 (size=7) 2024-11-13T13:37:12,761 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40741 is added to blk_1073741825_1001 (size=7) 2024-11-13T13:37:12,762 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d with version=8 2024-11-13T13:37:12,762 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:38779/user/jenkins/test-data/52d71c14-a597-0de0-63d6-b75f134ce0ba/hbase-staging 2024-11-13T13:37:12,764 INFO [Time-limited test {}] client.ConnectionUtils(128): master/bfeb2336aed7:0 server-side Connection retries=45 2024-11-13T13:37:12,764 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-13T13:37:12,764 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-13T13:37:12,764 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-13T13:37:12,765 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-13T13:37:12,765 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-13T13:37:12,765 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-13T13:37:12,765 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-13T13:37:12,765 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:45355 2024-11-13T13:37:12,767 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:45355 connecting to ZooKeeper ensemble=127.0.0.1:54356 2024-11-13T13:37:12,884 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:453550x0, quorum=127.0.0.1:54356, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-13T13:37:12,884 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:45355-0x101346a09d80000 connected 2024-11-13T13:37:12,963 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T13:37:12,965 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T13:37:12,968 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:45355-0x101346a09d80000, quorum=127.0.0.1:54356, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-13T13:37:12,969 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d, hbase.cluster.distributed=false 2024-11-13T13:37:12,971 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:45355-0x101346a09d80000, quorum=127.0.0.1:54356, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-13T13:37:12,972 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=45355 2024-11-13T13:37:12,972 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=45355 2024-11-13T13:37:12,973 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=45355 2024-11-13T13:37:12,973 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=45355 2024-11-13T13:37:12,973 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=45355 2024-11-13T13:37:12,992 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/bfeb2336aed7:0 server-side Connection retries=45 2024-11-13T13:37:12,992 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-13T13:37:12,992 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-13T13:37:12,992 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-13T13:37:12,992 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-13T13:37:12,992 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-13T13:37:12,992 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-13T13:37:12,993 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-13T13:37:12,993 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:38995 2024-11-13T13:37:12,995 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:38995 connecting to ZooKeeper ensemble=127.0.0.1:54356 2024-11-13T13:37:12,995 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T13:37:12,997 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T13:37:13,005 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:389950x0, quorum=127.0.0.1:54356, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-13T13:37:13,005 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:38995-0x101346a09d80001 connected 2024-11-13T13:37:13,005 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:38995-0x101346a09d80001, quorum=127.0.0.1:54356, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-13T13:37:13,005 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-13T13:37:13,006 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-13T13:37:13,006 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:38995-0x101346a09d80001, quorum=127.0.0.1:54356, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-13T13:37:13,007 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:38995-0x101346a09d80001, quorum=127.0.0.1:54356, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-13T13:37:13,007 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=38995 2024-11-13T13:37:13,008 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=38995 2024-11-13T13:37:13,008 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=38995 2024-11-13T13:37:13,008 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=38995 2024-11-13T13:37:13,008 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=38995 2024-11-13T13:37:13,022 DEBUG [M:0;bfeb2336aed7:45355 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;bfeb2336aed7:45355 2024-11-13T13:37:13,022 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/bfeb2336aed7,45355,1731505032764 2024-11-13T13:37:13,033 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38995-0x101346a09d80001, quorum=127.0.0.1:54356, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-13T13:37:13,033 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45355-0x101346a09d80000, quorum=127.0.0.1:54356, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-13T13:37:13,034 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:45355-0x101346a09d80000, quorum=127.0.0.1:54356, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/bfeb2336aed7,45355,1731505032764 2024-11-13T13:37:13,044 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45355-0x101346a09d80000, quorum=127.0.0.1:54356, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T13:37:13,044 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38995-0x101346a09d80001, quorum=127.0.0.1:54356, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-13T13:37:13,044 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38995-0x101346a09d80001, quorum=127.0.0.1:54356, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T13:37:13,044 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:45355-0x101346a09d80000, quorum=127.0.0.1:54356, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-13T13:37:13,045 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/bfeb2336aed7,45355,1731505032764 from backup master directory 2024-11-13T13:37:13,057 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45355-0x101346a09d80000, quorum=127.0.0.1:54356, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/bfeb2336aed7,45355,1731505032764 2024-11-13T13:37:13,057 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38995-0x101346a09d80001, quorum=127.0.0.1:54356, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-13T13:37:13,057 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45355-0x101346a09d80000, quorum=127.0.0.1:54356, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-13T13:37:13,057 WARN [master/bfeb2336aed7:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-13T13:37:13,057 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=bfeb2336aed7,45355,1731505032764 2024-11-13T13:37:13,062 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/hbase.id] with ID: 4e00270f-4572-4526-8602-bb6d1180698f 2024-11-13T13:37:13,062 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/.tmp/hbase.id 2024-11-13T13:37:13,068 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33351 is added to blk_1073741826_1002 (size=42) 2024-11-13T13:37:13,068 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40741 is added to blk_1073741826_1002 (size=42) 2024-11-13T13:37:13,069 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/.tmp/hbase.id]:[hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/hbase.id] 2024-11-13T13:37:13,079 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T13:37:13,079 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-13T13:37:13,081 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-13T13:37:13,089 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38995-0x101346a09d80001, quorum=127.0.0.1:54356, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T13:37:13,089 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45355-0x101346a09d80000, quorum=127.0.0.1:54356, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T13:37:13,095 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33351 is added to blk_1073741827_1003 (size=196) 2024-11-13T13:37:13,095 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40741 is added to blk_1073741827_1003 (size=196) 2024-11-13T13:37:13,096 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-13T13:37:13,097 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-13T13:37:13,097 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-13T13:37:13,104 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33351 is added to blk_1073741828_1004 (size=1189) 2024-11-13T13:37:13,104 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40741 is added to blk_1073741828_1004 (size=1189) 2024-11-13T13:37:13,105 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/MasterData/data/master/store 2024-11-13T13:37:13,111 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33351 is added to blk_1073741829_1005 (size=34) 2024-11-13T13:37:13,111 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40741 is added to blk_1073741829_1005 (size=34) 2024-11-13T13:37:13,112 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T13:37:13,112 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-13T13:37:13,112 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T13:37:13,112 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T13:37:13,112 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-13T13:37:13,112 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T13:37:13,113 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T13:37:13,113 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731505033112Disabling compacts and flushes for region at 1731505033112Disabling writes for close at 1731505033112Writing region close event to WAL at 1731505033112Closed at 1731505033112 2024-11-13T13:37:13,114 WARN [master/bfeb2336aed7:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/MasterData/data/master/store/.initializing 2024-11-13T13:37:13,114 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/MasterData/WALs/bfeb2336aed7,45355,1731505032764 2024-11-13T13:37:13,116 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=bfeb2336aed7%2C45355%2C1731505032764, suffix=, logDir=hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/MasterData/WALs/bfeb2336aed7,45355,1731505032764, archiveDir=hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/MasterData/oldWALs, maxLogs=10 2024-11-13T13:37:13,117 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor bfeb2336aed7%2C45355%2C1731505032764.1731505033117 2024-11-13T13:37:13,121 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/MasterData/WALs/bfeb2336aed7,45355,1731505032764/bfeb2336aed7%2C45355%2C1731505032764.1731505033117 2024-11-13T13:37:13,122 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41057:41057),(127.0.0.1/127.0.0.1:41091:41091)] 2024-11-13T13:37:13,123 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-13T13:37:13,123 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T13:37:13,123 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-13T13:37:13,123 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-13T13:37:13,124 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-13T13:37:13,126 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-13T13:37:13,126 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T13:37:13,126 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T13:37:13,126 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-13T13:37:13,127 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-13T13:37:13,128 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T13:37:13,128 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T13:37:13,128 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-13T13:37:13,129 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-13T13:37:13,129 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T13:37:13,130 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T13:37:13,130 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-13T13:37:13,131 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-13T13:37:13,131 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T13:37:13,132 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T13:37:13,132 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-13T13:37:13,133 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-13T13:37:13,133 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-13T13:37:13,135 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-13T13:37:13,135 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-13T13:37:13,136 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-13T13:37:13,137 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-13T13:37:13,140 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-13T13:37:13,140 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=846099, jitterRate=0.07587170600891113}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-13T13:37:13,141 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731505033123Initializing all the Stores at 1731505033124 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731505033124Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731505033124Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731505033124Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731505033124Cleaning up temporary data from old regions at 1731505033135 (+11 ms)Region opened successfully at 1731505033141 (+6 ms) 2024-11-13T13:37:13,142 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-13T13:37:13,145 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@76d190eb, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=bfeb2336aed7/172.17.0.2:0 2024-11-13T13:37:13,145 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,33523,1731504891245/bfeb2336aed7%2C33523%2C1731504891245.meta.1731504892258.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:37:13,145 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,38781,1731504892507/bfeb2336aed7%2C38781%2C1731504892507.1731504892747 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:37:13,146 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-13T13:37:13,146 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-13T13:37:13,146 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-13T13:37:13,146 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-13T13:37:13,147 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-13T13:37:13,147 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-13T13:37:13,147 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-13T13:37:13,149 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-13T13:37:13,150 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45355-0x101346a09d80000, quorum=127.0.0.1:54356, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-13T13:37:13,183 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-13T13:37:13,184 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-13T13:37:13,185 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45355-0x101346a09d80000, quorum=127.0.0.1:54356, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-13T13:37:13,194 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-13T13:37:13,194 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-13T13:37:13,195 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45355-0x101346a09d80000, quorum=127.0.0.1:54356, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-13T13:37:13,204 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-13T13:37:13,205 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45355-0x101346a09d80000, quorum=127.0.0.1:54356, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-13T13:37:13,215 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-13T13:37:13,217 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45355-0x101346a09d80000, quorum=127.0.0.1:54356, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-13T13:37:13,225 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-13T13:37:13,236 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45355-0x101346a09d80000, quorum=127.0.0.1:54356, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-13T13:37:13,236 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38995-0x101346a09d80001, quorum=127.0.0.1:54356, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-13T13:37:13,236 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38995-0x101346a09d80001, quorum=127.0.0.1:54356, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T13:37:13,236 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45355-0x101346a09d80000, quorum=127.0.0.1:54356, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T13:37:13,237 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=bfeb2336aed7,45355,1731505032764, sessionid=0x101346a09d80000, setting cluster-up flag (Was=false) 2024-11-13T13:37:13,257 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45355-0x101346a09d80000, quorum=127.0.0.1:54356, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T13:37:13,257 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38995-0x101346a09d80001, quorum=127.0.0.1:54356, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T13:37:13,289 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-13T13:37:13,290 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=bfeb2336aed7,45355,1731505032764 2024-11-13T13:37:13,310 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38995-0x101346a09d80001, quorum=127.0.0.1:54356, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T13:37:13,310 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45355-0x101346a09d80000, quorum=127.0.0.1:54356, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T13:37:13,341 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-13T13:37:13,343 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=bfeb2336aed7,45355,1731505032764 2024-11-13T13:37:13,345 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-13T13:37:13,347 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-13T13:37:13,347 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-13T13:37:13,347 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-13T13:37:13,348 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: bfeb2336aed7,45355,1731505032764 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-13T13:37:13,350 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/bfeb2336aed7:0, corePoolSize=5, maxPoolSize=5 2024-11-13T13:37:13,350 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/bfeb2336aed7:0, corePoolSize=5, maxPoolSize=5 2024-11-13T13:37:13,350 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/bfeb2336aed7:0, corePoolSize=5, maxPoolSize=5 2024-11-13T13:37:13,350 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/bfeb2336aed7:0, corePoolSize=5, maxPoolSize=5 2024-11-13T13:37:13,350 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/bfeb2336aed7:0, corePoolSize=10, maxPoolSize=10 2024-11-13T13:37:13,351 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/bfeb2336aed7:0, corePoolSize=1, maxPoolSize=1 2024-11-13T13:37:13,351 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/bfeb2336aed7:0, corePoolSize=2, maxPoolSize=2 2024-11-13T13:37:13,351 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/bfeb2336aed7:0, corePoolSize=1, maxPoolSize=1 2024-11-13T13:37:13,353 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-13T13:37:13,353 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-13T13:37:13,354 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T13:37:13,354 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-13T13:37:13,356 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731505063356 2024-11-13T13:37:13,356 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-13T13:37:13,357 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-13T13:37:13,357 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-13T13:37:13,357 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-13T13:37:13,357 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-13T13:37:13,357 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-13T13:37:13,357 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-13T13:37:13,357 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-13T13:37:13,358 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-13T13:37:13,358 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-13T13:37:13,358 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-13T13:37:13,358 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-13T13:37:13,358 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/bfeb2336aed7:0:becomeActiveMaster-HFileCleaner.large.0-1731505033358,5,FailOnTimeoutGroup] 2024-11-13T13:37:13,358 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/bfeb2336aed7:0:becomeActiveMaster-HFileCleaner.small.0-1731505033358,5,FailOnTimeoutGroup] 2024-11-13T13:37:13,358 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-13T13:37:13,358 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-13T13:37:13,358 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-13T13:37:13,358 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-13T13:37:13,360 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33351 is added to blk_1073741831_1007 (size=1321) 2024-11-13T13:37:13,360 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40741 is added to blk_1073741831_1007 (size=1321) 2024-11-13T13:37:13,361 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-13T13:37:13,361 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d 2024-11-13T13:37:13,367 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33351 is added to blk_1073741832_1008 (size=32) 2024-11-13T13:37:13,367 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40741 is added to blk_1073741832_1008 (size=32) 2024-11-13T13:37:13,369 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T13:37:13,370 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-13T13:37:13,371 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-13T13:37:13,372 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T13:37:13,372 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T13:37:13,372 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-13T13:37:13,373 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-13T13:37:13,373 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T13:37:13,374 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T13:37:13,374 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-13T13:37:13,375 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-13T13:37:13,375 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T13:37:13,375 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T13:37:13,375 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-13T13:37:13,376 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-13T13:37:13,376 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T13:37:13,377 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T13:37:13,377 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-13T13:37:13,378 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/hbase/meta/1588230740 2024-11-13T13:37:13,378 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/hbase/meta/1588230740 2024-11-13T13:37:13,379 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-13T13:37:13,379 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-13T13:37:13,380 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-13T13:37:13,381 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-13T13:37:13,383 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-13T13:37:13,384 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=764280, jitterRate=-0.028168976306915283}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-13T13:37:13,384 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731505033369Initializing all the Stores at 1731505033370 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731505033370Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731505033370Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731505033370Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731505033370Cleaning up temporary data from old regions at 1731505033379 (+9 ms)Region opened successfully at 1731505033384 (+5 ms) 2024-11-13T13:37:13,385 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-13T13:37:13,385 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-13T13:37:13,385 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-13T13:37:13,385 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-13T13:37:13,385 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-13T13:37:13,385 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-13T13:37:13,385 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731505033385Disabling compacts and flushes for region at 1731505033385Disabling writes for close at 1731505033385Writing region close event to WAL at 1731505033385Closed at 1731505033385 2024-11-13T13:37:13,387 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-13T13:37:13,387 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-13T13:37:13,387 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-13T13:37:13,389 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-13T13:37:13,390 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-13T13:37:13,410 INFO [RS:0;bfeb2336aed7:38995 {}] regionserver.HRegionServer(746): ClusterId : 4e00270f-4572-4526-8602-bb6d1180698f 2024-11-13T13:37:13,410 DEBUG [RS:0;bfeb2336aed7:38995 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-13T13:37:13,424 DEBUG [RS:0;bfeb2336aed7:38995 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-13T13:37:13,424 DEBUG [RS:0;bfeb2336aed7:38995 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-13T13:37:13,437 DEBUG [RS:0;bfeb2336aed7:38995 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-13T13:37:13,437 DEBUG [RS:0;bfeb2336aed7:38995 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3274a8f1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=bfeb2336aed7/172.17.0.2:0 2024-11-13T13:37:13,451 DEBUG [RS:0;bfeb2336aed7:38995 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;bfeb2336aed7:38995 2024-11-13T13:37:13,451 INFO [RS:0;bfeb2336aed7:38995 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-13T13:37:13,451 INFO [RS:0;bfeb2336aed7:38995 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-13T13:37:13,451 DEBUG [RS:0;bfeb2336aed7:38995 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-13T13:37:13,451 INFO [RS:0;bfeb2336aed7:38995 {}] regionserver.HRegionServer(2659): reportForDuty to master=bfeb2336aed7,45355,1731505032764 with port=38995, startcode=1731505032992 2024-11-13T13:37:13,452 DEBUG [RS:0;bfeb2336aed7:38995 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-13T13:37:13,454 INFO [HMaster-EventLoopGroup-14-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33607, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.6 (auth:SIMPLE), service=RegionServerStatusService 2024-11-13T13:37:13,454 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45355 {}] master.ServerManager(363): Checking decommissioned status of RegionServer bfeb2336aed7,38995,1731505032992 2024-11-13T13:37:13,454 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45355 {}] master.ServerManager(517): Registering regionserver=bfeb2336aed7,38995,1731505032992 2024-11-13T13:37:13,456 DEBUG [RS:0;bfeb2336aed7:38995 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d 2024-11-13T13:37:13,456 DEBUG [RS:0;bfeb2336aed7:38995 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:41599 2024-11-13T13:37:13,456 DEBUG [RS:0;bfeb2336aed7:38995 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-13T13:37:13,468 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45355-0x101346a09d80000, quorum=127.0.0.1:54356, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-13T13:37:13,468 DEBUG [RS:0;bfeb2336aed7:38995 {}] zookeeper.ZKUtil(111): regionserver:38995-0x101346a09d80001, quorum=127.0.0.1:54356, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/bfeb2336aed7,38995,1731505032992 2024-11-13T13:37:13,468 WARN [RS:0;bfeb2336aed7:38995 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-13T13:37:13,468 INFO [RS:0;bfeb2336aed7:38995 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-13T13:37:13,468 DEBUG [RS:0;bfeb2336aed7:38995 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/WALs/bfeb2336aed7,38995,1731505032992 2024-11-13T13:37:13,468 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [bfeb2336aed7,38995,1731505032992] 2024-11-13T13:37:13,472 INFO [RS:0;bfeb2336aed7:38995 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-13T13:37:13,473 INFO [RS:0;bfeb2336aed7:38995 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-13T13:37:13,474 INFO [RS:0;bfeb2336aed7:38995 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-13T13:37:13,474 INFO [RS:0;bfeb2336aed7:38995 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-13T13:37:13,474 INFO [RS:0;bfeb2336aed7:38995 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-13T13:37:13,475 INFO [RS:0;bfeb2336aed7:38995 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-13T13:37:13,475 INFO [RS:0;bfeb2336aed7:38995 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-13T13:37:13,475 DEBUG [RS:0;bfeb2336aed7:38995 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/bfeb2336aed7:0, corePoolSize=1, maxPoolSize=1 2024-11-13T13:37:13,475 DEBUG [RS:0;bfeb2336aed7:38995 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/bfeb2336aed7:0, corePoolSize=1, maxPoolSize=1 2024-11-13T13:37:13,475 DEBUG [RS:0;bfeb2336aed7:38995 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/bfeb2336aed7:0, corePoolSize=1, maxPoolSize=1 2024-11-13T13:37:13,476 DEBUG [RS:0;bfeb2336aed7:38995 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/bfeb2336aed7:0, corePoolSize=1, maxPoolSize=1 2024-11-13T13:37:13,476 DEBUG [RS:0;bfeb2336aed7:38995 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/bfeb2336aed7:0, corePoolSize=1, maxPoolSize=1 2024-11-13T13:37:13,476 DEBUG [RS:0;bfeb2336aed7:38995 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/bfeb2336aed7:0, corePoolSize=2, maxPoolSize=2 2024-11-13T13:37:13,476 DEBUG [RS:0;bfeb2336aed7:38995 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/bfeb2336aed7:0, corePoolSize=1, maxPoolSize=1 2024-11-13T13:37:13,476 DEBUG [RS:0;bfeb2336aed7:38995 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/bfeb2336aed7:0, corePoolSize=1, maxPoolSize=1 2024-11-13T13:37:13,476 DEBUG [RS:0;bfeb2336aed7:38995 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/bfeb2336aed7:0, corePoolSize=1, maxPoolSize=1 2024-11-13T13:37:13,476 DEBUG [RS:0;bfeb2336aed7:38995 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/bfeb2336aed7:0, corePoolSize=1, maxPoolSize=1 2024-11-13T13:37:13,476 DEBUG [RS:0;bfeb2336aed7:38995 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/bfeb2336aed7:0, corePoolSize=1, maxPoolSize=1 2024-11-13T13:37:13,476 DEBUG [RS:0;bfeb2336aed7:38995 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/bfeb2336aed7:0, corePoolSize=1, maxPoolSize=1 2024-11-13T13:37:13,476 DEBUG [RS:0;bfeb2336aed7:38995 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/bfeb2336aed7:0, corePoolSize=3, maxPoolSize=3 2024-11-13T13:37:13,476 DEBUG [RS:0;bfeb2336aed7:38995 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/bfeb2336aed7:0, corePoolSize=3, maxPoolSize=3 2024-11-13T13:37:13,477 INFO [RS:0;bfeb2336aed7:38995 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-13T13:37:13,477 INFO [RS:0;bfeb2336aed7:38995 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-13T13:37:13,477 INFO [RS:0;bfeb2336aed7:38995 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-13T13:37:13,477 INFO [RS:0;bfeb2336aed7:38995 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-13T13:37:13,477 INFO [RS:0;bfeb2336aed7:38995 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-13T13:37:13,477 INFO [RS:0;bfeb2336aed7:38995 {}] hbase.ChoreService(168): Chore ScheduledChore name=bfeb2336aed7,38995,1731505032992-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-13T13:37:13,491 INFO [RS:0;bfeb2336aed7:38995 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-13T13:37:13,491 INFO [RS:0;bfeb2336aed7:38995 {}] hbase.ChoreService(168): Chore ScheduledChore name=bfeb2336aed7,38995,1731505032992-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-13T13:37:13,491 INFO [RS:0;bfeb2336aed7:38995 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-13T13:37:13,491 INFO [RS:0;bfeb2336aed7:38995 {}] regionserver.Replication(171): bfeb2336aed7,38995,1731505032992 started 2024-11-13T13:37:13,506 INFO [RS:0;bfeb2336aed7:38995 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-13T13:37:13,506 INFO [RS:0;bfeb2336aed7:38995 {}] regionserver.HRegionServer(1482): Serving as bfeb2336aed7,38995,1731505032992, RpcServer on bfeb2336aed7/172.17.0.2:38995, sessionid=0x101346a09d80001 2024-11-13T13:37:13,506 DEBUG [RS:0;bfeb2336aed7:38995 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-13T13:37:13,506 DEBUG [RS:0;bfeb2336aed7:38995 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager bfeb2336aed7,38995,1731505032992 2024-11-13T13:37:13,506 DEBUG [RS:0;bfeb2336aed7:38995 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'bfeb2336aed7,38995,1731505032992' 2024-11-13T13:37:13,506 DEBUG [RS:0;bfeb2336aed7:38995 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-13T13:37:13,507 DEBUG [RS:0;bfeb2336aed7:38995 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-13T13:37:13,507 DEBUG [RS:0;bfeb2336aed7:38995 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-13T13:37:13,507 DEBUG [RS:0;bfeb2336aed7:38995 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-13T13:37:13,507 DEBUG [RS:0;bfeb2336aed7:38995 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager bfeb2336aed7,38995,1731505032992 2024-11-13T13:37:13,507 DEBUG [RS:0;bfeb2336aed7:38995 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'bfeb2336aed7,38995,1731505032992' 2024-11-13T13:37:13,507 DEBUG [RS:0;bfeb2336aed7:38995 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-13T13:37:13,508 DEBUG [RS:0;bfeb2336aed7:38995 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-13T13:37:13,508 DEBUG [RS:0;bfeb2336aed7:38995 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-13T13:37:13,508 INFO [RS:0;bfeb2336aed7:38995 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-13T13:37:13,508 INFO [RS:0;bfeb2336aed7:38995 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-13T13:37:13,540 WARN [bfeb2336aed7:45355 {}] assignment.AssignmentManager(2443): No servers available; cannot place 1 unassigned regions. 2024-11-13T13:37:13,610 INFO [RS:0;bfeb2336aed7:38995 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=bfeb2336aed7%2C38995%2C1731505032992, suffix=, logDir=hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/WALs/bfeb2336aed7,38995,1731505032992, archiveDir=hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/oldWALs, maxLogs=32 2024-11-13T13:37:13,611 INFO [RS:0;bfeb2336aed7:38995 {}] monitor.StreamSlowMonitor(122): New stream slow monitor bfeb2336aed7%2C38995%2C1731505032992.1731505033611 2024-11-13T13:37:13,625 INFO [RS:0;bfeb2336aed7:38995 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/WALs/bfeb2336aed7,38995,1731505032992/bfeb2336aed7%2C38995%2C1731505032992.1731505033611 2024-11-13T13:37:13,632 DEBUG [RS:0;bfeb2336aed7:38995 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41091:41091),(127.0.0.1/127.0.0.1:41057:41057)] 2024-11-13T13:37:13,790 DEBUG [bfeb2336aed7:45355 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-13T13:37:13,791 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=bfeb2336aed7,38995,1731505032992 2024-11-13T13:37:13,792 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as bfeb2336aed7,38995,1731505032992, state=OPENING 2024-11-13T13:37:13,836 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-13T13:37:13,847 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38995-0x101346a09d80001, quorum=127.0.0.1:54356, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T13:37:13,847 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45355-0x101346a09d80000, quorum=127.0.0.1:54356, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T13:37:13,848 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-13T13:37:13,848 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-13T13:37:13,848 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-13T13:37:13,848 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=bfeb2336aed7,38995,1731505032992}] 2024-11-13T13:37:14,002 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-13T13:37:14,005 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44865, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-13T13:37:14,009 INFO [RS_OPEN_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-13T13:37:14,009 INFO [RS_OPEN_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-13T13:37:14,011 INFO [RS_OPEN_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=bfeb2336aed7%2C38995%2C1731505032992.meta, suffix=.meta, logDir=hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/WALs/bfeb2336aed7,38995,1731505032992, archiveDir=hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/oldWALs, maxLogs=32 2024-11-13T13:37:14,012 INFO [RS_OPEN_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor bfeb2336aed7%2C38995%2C1731505032992.meta.1731505034012.meta 2024-11-13T13:37:14,016 INFO [RS_OPEN_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/WALs/bfeb2336aed7,38995,1731505032992/bfeb2336aed7%2C38995%2C1731505032992.meta.1731505034012.meta 2024-11-13T13:37:14,020 DEBUG [RS_OPEN_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41057:41057),(127.0.0.1/127.0.0.1:41091:41091)] 2024-11-13T13:37:14,021 DEBUG [RS_OPEN_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-13T13:37:14,022 DEBUG [RS_OPEN_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-13T13:37:14,022 DEBUG [RS_OPEN_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-13T13:37:14,022 INFO [RS_OPEN_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-13T13:37:14,022 DEBUG [RS_OPEN_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-13T13:37:14,022 DEBUG [RS_OPEN_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T13:37:14,022 DEBUG [RS_OPEN_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-13T13:37:14,022 DEBUG [RS_OPEN_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-13T13:37:14,023 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-13T13:37:14,024 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-13T13:37:14,024 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T13:37:14,024 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T13:37:14,025 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-13T13:37:14,025 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-13T13:37:14,025 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T13:37:14,026 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T13:37:14,026 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-13T13:37:14,027 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-13T13:37:14,027 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T13:37:14,027 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T13:37:14,027 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-13T13:37:14,028 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-13T13:37:14,028 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T13:37:14,028 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T13:37:14,028 DEBUG [RS_OPEN_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-13T13:37:14,029 DEBUG [RS_OPEN_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/hbase/meta/1588230740 2024-11-13T13:37:14,030 DEBUG [RS_OPEN_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/hbase/meta/1588230740 2024-11-13T13:37:14,031 DEBUG [RS_OPEN_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-13T13:37:14,031 DEBUG [RS_OPEN_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-13T13:37:14,032 DEBUG [RS_OPEN_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-13T13:37:14,033 DEBUG [RS_OPEN_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-13T13:37:14,033 INFO [RS_OPEN_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=705385, jitterRate=-0.10305763781070709}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-13T13:37:14,033 DEBUG [RS_OPEN_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-13T13:37:14,034 DEBUG [RS_OPEN_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731505034022Writing region info on filesystem at 1731505034022Initializing all the Stores at 1731505034023 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731505034023Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731505034023Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731505034023Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731505034023Cleaning up temporary data from old regions at 1731505034031 (+8 ms)Running coprocessor post-open hooks at 1731505034033 (+2 ms)Region opened successfully at 1731505034034 (+1 ms) 2024-11-13T13:37:14,035 INFO [RS_OPEN_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731505034002 2024-11-13T13:37:14,036 DEBUG [RS_OPEN_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-13T13:37:14,037 INFO [RS_OPEN_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-13T13:37:14,037 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=bfeb2336aed7,38995,1731505032992 2024-11-13T13:37:14,038 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as bfeb2336aed7,38995,1731505032992, state=OPEN 2024-11-13T13:37:14,076 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38995-0x101346a09d80001, quorum=127.0.0.1:54356, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-13T13:37:14,076 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45355-0x101346a09d80000, quorum=127.0.0.1:54356, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-13T13:37:14,076 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=bfeb2336aed7,38995,1731505032992 2024-11-13T13:37:14,076 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-13T13:37:14,076 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-13T13:37:14,081 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-13T13:37:14,081 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=bfeb2336aed7,38995,1731505032992 in 228 msec 2024-11-13T13:37:14,085 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-13T13:37:14,085 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 694 msec 2024-11-13T13:37:14,087 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-13T13:37:14,087 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-13T13:37:14,089 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-13T13:37:14,089 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=bfeb2336aed7,38995,1731505032992, seqNum=-1] 2024-11-13T13:37:14,090 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-13T13:37:14,091 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35813, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-13T13:37:14,098 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 751 msec 2024-11-13T13:37:14,099 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731505034099, completionTime=-1 2024-11-13T13:37:14,099 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-13T13:37:14,099 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-13T13:37:14,101 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=1 2024-11-13T13:37:14,101 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731505094101 2024-11-13T13:37:14,101 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731505154101 2024-11-13T13:37:14,101 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 2 msec 2024-11-13T13:37:14,102 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=bfeb2336aed7,45355,1731505032764-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-13T13:37:14,102 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=bfeb2336aed7,45355,1731505032764-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-13T13:37:14,102 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=bfeb2336aed7,45355,1731505032764-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-13T13:37:14,102 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-bfeb2336aed7:45355, period=300000, unit=MILLISECONDS is enabled. 2024-11-13T13:37:14,102 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-13T13:37:14,102 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-13T13:37:14,104 DEBUG [master/bfeb2336aed7:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-13T13:37:14,107 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.050sec 2024-11-13T13:37:14,107 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-13T13:37:14,107 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-13T13:37:14,107 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-13T13:37:14,107 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-13T13:37:14,107 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-13T13:37:14,107 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=bfeb2336aed7,45355,1731505032764-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-13T13:37:14,107 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=bfeb2336aed7,45355,1731505032764-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-13T13:37:14,111 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-13T13:37:14,111 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-13T13:37:14,111 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=bfeb2336aed7,45355,1731505032764-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-13T13:37:14,111 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@783c51c3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T13:37:14,111 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request bfeb2336aed7,45355,-1 for getting cluster id 2024-11-13T13:37:14,111 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-13T13:37:14,113 DEBUG [HMaster-EventLoopGroup-14-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '4e00270f-4572-4526-8602-bb6d1180698f' 2024-11-13T13:37:14,113 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-13T13:37:14,114 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "4e00270f-4572-4526-8602-bb6d1180698f" 2024-11-13T13:37:14,114 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@539afd42, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T13:37:14,114 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [bfeb2336aed7,45355,-1] 2024-11-13T13:37:14,114 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-13T13:37:14,115 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T13:37:14,116 INFO [HMaster-EventLoopGroup-14-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47398, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-13T13:37:14,117 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6182ce6d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T13:37:14,118 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-13T13:37:14,119 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=bfeb2336aed7,38995,1731505032992, seqNum=-1] 2024-11-13T13:37:14,119 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-13T13:37:14,120 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34298, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-13T13:37:14,122 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=bfeb2336aed7,45355,1731505032764 2024-11-13T13:37:14,122 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T13:37:14,125 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-13T13:37:14,125 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-13T13:37:14,126 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.AsyncConnectionImpl(321): The fetched master address is bfeb2336aed7,45355,1731505032764 2024-11-13T13:37:14,126 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@60a433c8 2024-11-13T13:37:14,126 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-13T13:37:14,127 INFO [HMaster-EventLoopGroup-14-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47406, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-13T13:37:14,128 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45355 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-13T13:37:14,128 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45355 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-13T13:37:14,128 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45355 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testLogRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-13T13:37:14,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45355 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRolling 2024-11-13T13:37:14,130 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-11-13T13:37:14,130 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T13:37:14,130 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45355 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRolling" procId is: 4 2024-11-13T13:37:14,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45355 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-13T13:37:14,131 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-13T13:37:14,138 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40741 is added to blk_1073741835_1011 (size=381) 2024-11-13T13:37:14,138 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33351 is added to blk_1073741835_1011 (size=381) 2024-11-13T13:37:14,140 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 56afd5c2ff9ec93f3af082ccede46aee, NAME => 'TestLogRolling-testLogRolling,,1731505034127.56afd5c2ff9ec93f3af082ccede46aee.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d 2024-11-13T13:37:14,145 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33351 is added to blk_1073741836_1012 (size=64) 2024-11-13T13:37:14,145 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40741 is added to blk_1073741836_1012 (size=64) 2024-11-13T13:37:14,145 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,33523,1731504891245/bfeb2336aed7%2C33523%2C1731504891245.meta.1731504892258.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:37:14,145 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,38781,1731504892507/bfeb2336aed7%2C38781%2C1731504892507.1731504892747 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:37:14,146 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1731505034127.56afd5c2ff9ec93f3af082ccede46aee.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T13:37:14,146 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1722): Closing 56afd5c2ff9ec93f3af082ccede46aee, disabling compactions & flushes 2024-11-13T13:37:14,146 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1731505034127.56afd5c2ff9ec93f3af082ccede46aee. 2024-11-13T13:37:14,146 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1731505034127.56afd5c2ff9ec93f3af082ccede46aee. 2024-11-13T13:37:14,146 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1731505034127.56afd5c2ff9ec93f3af082ccede46aee. after waiting 0 ms 2024-11-13T13:37:14,146 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1731505034127.56afd5c2ff9ec93f3af082ccede46aee. 2024-11-13T13:37:14,146 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1731505034127.56afd5c2ff9ec93f3af082ccede46aee. 2024-11-13T13:37:14,146 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for 56afd5c2ff9ec93f3af082ccede46aee: Waiting for close lock at 1731505034146Disabling compacts and flushes for region at 1731505034146Disabling writes for close at 1731505034146Writing region close event to WAL at 1731505034146Closed at 1731505034146 2024-11-13T13:37:14,147 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_ADD_TO_META 2024-11-13T13:37:14,147 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRolling,,1731505034127.56afd5c2ff9ec93f3af082ccede46aee.","families":{"info":[{"qualifier":"regioninfo","vlen":63,"tag":[],"timestamp":"1731505034147"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731505034147"}]},"ts":"1731505034147"} 2024-11-13T13:37:14,150 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-13T13:37:14,152 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-13T13:37:14,152 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731505034152"}]},"ts":"1731505034152"} 2024-11-13T13:37:14,154 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRolling, state=ENABLING in hbase:meta 2024-11-13T13:37:14,155 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=56afd5c2ff9ec93f3af082ccede46aee, ASSIGN}] 2024-11-13T13:37:14,156 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=56afd5c2ff9ec93f3af082ccede46aee, ASSIGN 2024-11-13T13:37:14,157 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=56afd5c2ff9ec93f3af082ccede46aee, ASSIGN; state=OFFLINE, location=bfeb2336aed7,38995,1731505032992; forceNewPlan=false, retain=false 2024-11-13T13:37:14,308 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=56afd5c2ff9ec93f3af082ccede46aee, regionState=OPENING, regionLocation=bfeb2336aed7,38995,1731505032992 2024-11-13T13:37:14,312 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=56afd5c2ff9ec93f3af082ccede46aee, ASSIGN because future has completed 2024-11-13T13:37:14,313 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 56afd5c2ff9ec93f3af082ccede46aee, server=bfeb2336aed7,38995,1731505032992}] 2024-11-13T13:37:14,471 INFO [RS_OPEN_REGION-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,,1731505034127.56afd5c2ff9ec93f3af082ccede46aee. 2024-11-13T13:37:14,471 DEBUG [RS_OPEN_REGION-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 56afd5c2ff9ec93f3af082ccede46aee, NAME => 'TestLogRolling-testLogRolling,,1731505034127.56afd5c2ff9ec93f3af082ccede46aee.', STARTKEY => '', ENDKEY => ''} 2024-11-13T13:37:14,471 DEBUG [RS_OPEN_REGION-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling 56afd5c2ff9ec93f3af082ccede46aee 2024-11-13T13:37:14,471 DEBUG [RS_OPEN_REGION-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1731505034127.56afd5c2ff9ec93f3af082ccede46aee.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T13:37:14,471 DEBUG [RS_OPEN_REGION-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 56afd5c2ff9ec93f3af082ccede46aee 2024-11-13T13:37:14,472 DEBUG [RS_OPEN_REGION-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 56afd5c2ff9ec93f3af082ccede46aee 2024-11-13T13:37:14,473 INFO [StoreOpener-56afd5c2ff9ec93f3af082ccede46aee-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 56afd5c2ff9ec93f3af082ccede46aee 2024-11-13T13:37:14,475 INFO [StoreOpener-56afd5c2ff9ec93f3af082ccede46aee-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 56afd5c2ff9ec93f3af082ccede46aee columnFamilyName info 2024-11-13T13:37:14,475 DEBUG [StoreOpener-56afd5c2ff9ec93f3af082ccede46aee-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T13:37:14,476 INFO [StoreOpener-56afd5c2ff9ec93f3af082ccede46aee-1 {}] regionserver.HStore(327): Store=56afd5c2ff9ec93f3af082ccede46aee/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T13:37:14,476 DEBUG [RS_OPEN_REGION-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 56afd5c2ff9ec93f3af082ccede46aee 2024-11-13T13:37:14,477 DEBUG [RS_OPEN_REGION-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/56afd5c2ff9ec93f3af082ccede46aee 2024-11-13T13:37:14,478 DEBUG [RS_OPEN_REGION-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/56afd5c2ff9ec93f3af082ccede46aee 2024-11-13T13:37:14,478 DEBUG [RS_OPEN_REGION-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 56afd5c2ff9ec93f3af082ccede46aee 2024-11-13T13:37:14,478 DEBUG [RS_OPEN_REGION-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 56afd5c2ff9ec93f3af082ccede46aee 2024-11-13T13:37:14,480 DEBUG [RS_OPEN_REGION-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 56afd5c2ff9ec93f3af082ccede46aee 2024-11-13T13:37:14,482 DEBUG [RS_OPEN_REGION-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/56afd5c2ff9ec93f3af082ccede46aee/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-13T13:37:14,483 INFO [RS_OPEN_REGION-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 56afd5c2ff9ec93f3af082ccede46aee; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=847078, jitterRate=0.07711638510227203}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-13T13:37:14,483 DEBUG [RS_OPEN_REGION-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 56afd5c2ff9ec93f3af082ccede46aee 2024-11-13T13:37:14,483 DEBUG [RS_OPEN_REGION-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 56afd5c2ff9ec93f3af082ccede46aee: Running coprocessor pre-open hook at 1731505034472Writing region info on filesystem at 1731505034472Initializing all the Stores at 1731505034473 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731505034473Cleaning up temporary data from old regions at 1731505034478 (+5 ms)Running coprocessor post-open hooks at 1731505034483 (+5 ms)Region opened successfully at 1731505034483 2024-11-13T13:37:14,484 INFO [RS_OPEN_REGION-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,,1731505034127.56afd5c2ff9ec93f3af082ccede46aee., pid=6, masterSystemTime=1731505034466 2024-11-13T13:37:14,487 DEBUG [RS_OPEN_REGION-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,,1731505034127.56afd5c2ff9ec93f3af082ccede46aee. 2024-11-13T13:37:14,487 INFO [RS_OPEN_REGION-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,,1731505034127.56afd5c2ff9ec93f3af082ccede46aee. 2024-11-13T13:37:14,488 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=56afd5c2ff9ec93f3af082ccede46aee, regionState=OPEN, openSeqNum=2, regionLocation=bfeb2336aed7,38995,1731505032992 2024-11-13T13:37:14,492 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 56afd5c2ff9ec93f3af082ccede46aee, server=bfeb2336aed7,38995,1731505032992 because future has completed 2024-11-13T13:37:14,497 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-13T13:37:14,497 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 56afd5c2ff9ec93f3af082ccede46aee, server=bfeb2336aed7,38995,1731505032992 in 181 msec 2024-11-13T13:37:14,501 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-13T13:37:14,501 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=56afd5c2ff9ec93f3af082ccede46aee, ASSIGN in 342 msec 2024-11-13T13:37:14,502 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-13T13:37:14,502 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731505034502"}]},"ts":"1731505034502"} 2024-11-13T13:37:14,505 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRolling, state=ENABLED in hbase:meta 2024-11-13T13:37:14,506 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_POST_OPERATION 2024-11-13T13:37:14,508 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRolling in 378 msec 2024-11-13T13:37:14,914 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:37:14,914 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:37:14,914 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:37:14,914 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:37:14,915 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:37:14,915 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:37:14,916 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:37:14,916 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:37:14,939 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:37:14,939 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:37:14,939 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:37:14,939 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:37:14,939 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:37:14,940 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:37:14,944 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:37:14,944 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:37:14,944 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:37:14,946 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:37:15,146 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,38781,1731504892507/bfeb2336aed7%2C38781%2C1731504892507.1731504892747 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:37:15,146 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,33523,1731504891245/bfeb2336aed7%2C33523%2C1731504891245.meta.1731504892258.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:37:15,451 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-13T13:37:15,452 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:37:15,452 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:37:15,453 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:37:15,453 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:37:15,453 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:37:15,453 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:37:15,454 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:37:15,454 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:37:15,488 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:37:15,489 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:37:15,489 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:37:15,489 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:37:15,489 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:37:15,490 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:37:15,496 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:37:15,497 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:37:15,497 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:37:15,500 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:37:16,146 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,38781,1731504892507/bfeb2336aed7%2C38781%2C1731504892507.1731504892747 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:37:16,146 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,33523,1731504891245/bfeb2336aed7%2C33523%2C1731504891245.meta.1731504892258.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:37:17,147 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,33523,1731504891245/bfeb2336aed7%2C33523%2C1731504891245.meta.1731504892258.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:37:17,147 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,38781,1731504892507/bfeb2336aed7%2C38781%2C1731504892507.1731504892747 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:37:18,148 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,33523,1731504891245/bfeb2336aed7%2C33523%2C1731504891245.meta.1731504892258.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:37:18,148 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,38781,1731504892507/bfeb2336aed7%2C38781%2C1731504892507.1731504892747 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:37:19,149 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,33523,1731504891245/bfeb2336aed7%2C33523%2C1731504891245.meta.1731504892258.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:37:19,149 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,38781,1731504892507/bfeb2336aed7%2C38781%2C1731504892507.1731504892747 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:37:19,472 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-13T13:37:19,472 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRolling' 2024-11-13T13:37:20,149 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,38781,1731504892507/bfeb2336aed7%2C38781%2C1731504892507.1731504892747 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:37:20,149 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,33523,1731504891245/bfeb2336aed7%2C33523%2C1731504891245.meta.1731504892258.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:37:21,150 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,33523,1731504891245/bfeb2336aed7%2C33523%2C1731504891245.meta.1731504892258.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:37:21,150 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,38781,1731504892507/bfeb2336aed7%2C38781%2C1731504892507.1731504892747 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:37:21,948 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-13T13:37:21,948 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-13T13:37:21,948 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-13T13:37:21,948 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-13T13:37:21,948 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-13T13:37:21,948 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-11-13T13:37:21,949 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling 2024-11-13T13:37:21,949 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling Metrics about Tables on a single HBase RegionServer 2024-11-13T13:37:22,151 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,33523,1731504891245/bfeb2336aed7%2C33523%2C1731504891245.meta.1731504892258.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:37:22,151 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,38781,1731504892507/bfeb2336aed7%2C38781%2C1731504892507.1731504892747 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:37:23,151 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,33523,1731504891245/bfeb2336aed7%2C33523%2C1731504891245.meta.1731504892258.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:37:23,151 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,38781,1731504892507/bfeb2336aed7%2C38781%2C1731504892507.1731504892747 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:37:24,152 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,33523,1731504891245/bfeb2336aed7%2C33523%2C1731504891245.meta.1731504892258.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:37:24,152 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,38781,1731504892507/bfeb2336aed7%2C38781%2C1731504892507.1731504892747 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:37:24,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45355 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-13T13:37:24,182 INFO [RPCClient-NioEventLoopGroup-4-7 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRolling completed 2024-11-13T13:37:24,182 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRolling,, stopping at row=TestLogRolling-testLogRolling ,, for max=2147483647 with caching=100 2024-11-13T13:37:24,185 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRolling 2024-11-13T13:37:24,185 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRolling,,1731505034127.56afd5c2ff9ec93f3af082ccede46aee. 2024-11-13T13:37:24,188 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testLogRolling,,1731505034127.56afd5c2ff9ec93f3af082ccede46aee., hostname=bfeb2336aed7,38995,1731505032992, seqNum=2] 2024-11-13T13:37:24,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38995 {}] regionserver.HRegion(8855): Flush requested on 56afd5c2ff9ec93f3af082ccede46aee 2024-11-13T13:37:24,203 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 56afd5c2ff9ec93f3af082ccede46aee 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-13T13:37:24,223 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/56afd5c2ff9ec93f3af082ccede46aee/.tmp/info/c789ae7d7ca34c8b92aff50ae03e2543 is 1080, key is row0001/info:/1731505044189/Put/seqid=0 2024-11-13T13:37:24,234 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40741 is added to blk_1073741837_1013 (size=12509) 2024-11-13T13:37:24,236 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33351 is added to blk_1073741837_1013 (size=12509) 2024-11-13T13:37:24,236 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/56afd5c2ff9ec93f3af082ccede46aee/.tmp/info/c789ae7d7ca34c8b92aff50ae03e2543 2024-11-13T13:37:24,244 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/56afd5c2ff9ec93f3af082ccede46aee/.tmp/info/c789ae7d7ca34c8b92aff50ae03e2543 as hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/56afd5c2ff9ec93f3af082ccede46aee/info/c789ae7d7ca34c8b92aff50ae03e2543 2024-11-13T13:37:24,250 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/56afd5c2ff9ec93f3af082ccede46aee/info/c789ae7d7ca34c8b92aff50ae03e2543, entries=7, sequenceid=11, filesize=12.2 K 2024-11-13T13:37:24,251 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=22.07 KB/22596 for 56afd5c2ff9ec93f3af082ccede46aee in 48ms, sequenceid=11, compaction requested=false 2024-11-13T13:37:24,251 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 56afd5c2ff9ec93f3af082ccede46aee: 2024-11-13T13:37:24,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38995 {}] regionserver.HRegion(8855): Flush requested on 56afd5c2ff9ec93f3af082ccede46aee 2024-11-13T13:37:24,252 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 56afd5c2ff9ec93f3af082ccede46aee 1/1 column families, dataSize=23.12 KB heapSize=25 KB 2024-11-13T13:37:24,257 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/56afd5c2ff9ec93f3af082ccede46aee/.tmp/info/22b50d1ed7ce4e429c2de64443ccc6f3 is 1080, key is row0008/info:/1731505044204/Put/seqid=0 2024-11-13T13:37:24,266 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33351 is added to blk_1073741838_1014 (size=28684) 2024-11-13T13:37:24,266 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40741 is added to blk_1073741838_1014 (size=28684) 2024-11-13T13:37:24,266 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=23.12 KB at sequenceid=36 (bloomFilter=true), to=hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/56afd5c2ff9ec93f3af082ccede46aee/.tmp/info/22b50d1ed7ce4e429c2de64443ccc6f3 2024-11-13T13:37:24,272 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/56afd5c2ff9ec93f3af082ccede46aee/.tmp/info/22b50d1ed7ce4e429c2de64443ccc6f3 as hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/56afd5c2ff9ec93f3af082ccede46aee/info/22b50d1ed7ce4e429c2de64443ccc6f3 2024-11-13T13:37:24,278 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/56afd5c2ff9ec93f3af082ccede46aee/info/22b50d1ed7ce4e429c2de64443ccc6f3, entries=22, sequenceid=36, filesize=28.0 K 2024-11-13T13:37:24,279 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.12 KB/23672, heapSize ~24.98 KB/25584, currentSize=3.15 KB/3228 for 56afd5c2ff9ec93f3af082ccede46aee in 27ms, sequenceid=36, compaction requested=false 2024-11-13T13:37:24,279 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 56afd5c2ff9ec93f3af082ccede46aee: 2024-11-13T13:37:24,279 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=40.2 K, sizeToCheck=16.0 K 2024-11-13T13:37:24,279 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-13T13:37:24,279 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/56afd5c2ff9ec93f3af082ccede46aee/info/22b50d1ed7ce4e429c2de64443ccc6f3 because midkey is the same as first or last row 2024-11-13T13:37:25,153 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,38781,1731504892507/bfeb2336aed7%2C38781%2C1731504892507.1731504892747 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:37:25,153 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,33523,1731504891245/bfeb2336aed7%2C33523%2C1731504891245.meta.1731504892258.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:37:26,153 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,38781,1731504892507/bfeb2336aed7%2C38781%2C1731504892507.1731504892747 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:37:26,154 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,33523,1731504891245/bfeb2336aed7%2C33523%2C1731504891245.meta.1731504892258.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:37:26,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38995 {}] regionserver.HRegion(8855): Flush requested on 56afd5c2ff9ec93f3af082ccede46aee 2024-11-13T13:37:26,271 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 56afd5c2ff9ec93f3af082ccede46aee 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-13T13:37:26,277 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/56afd5c2ff9ec93f3af082ccede46aee/.tmp/info/07e5022afec140daa33d876227ce36ff is 1080, key is row0030/info:/1731505044254/Put/seqid=0 2024-11-13T13:37:26,291 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40741 is added to blk_1073741839_1015 (size=12509) 2024-11-13T13:37:26,291 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33351 is added to blk_1073741839_1015 (size=12509) 2024-11-13T13:37:26,292 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=46 (bloomFilter=true), to=hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/56afd5c2ff9ec93f3af082ccede46aee/.tmp/info/07e5022afec140daa33d876227ce36ff 2024-11-13T13:37:26,298 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/56afd5c2ff9ec93f3af082ccede46aee/.tmp/info/07e5022afec140daa33d876227ce36ff as hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/56afd5c2ff9ec93f3af082ccede46aee/info/07e5022afec140daa33d876227ce36ff 2024-11-13T13:37:26,304 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/56afd5c2ff9ec93f3af082ccede46aee/info/07e5022afec140daa33d876227ce36ff, entries=7, sequenceid=46, filesize=12.2 K 2024-11-13T13:37:26,306 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=12.61 KB/12912 for 56afd5c2ff9ec93f3af082ccede46aee in 34ms, sequenceid=46, compaction requested=true 2024-11-13T13:37:26,306 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 56afd5c2ff9ec93f3af082ccede46aee: 2024-11-13T13:37:26,306 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=52.4 K, sizeToCheck=16.0 K 2024-11-13T13:37:26,306 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-13T13:37:26,306 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/56afd5c2ff9ec93f3af082ccede46aee/info/22b50d1ed7ce4e429c2de64443ccc6f3 because midkey is the same as first or last row 2024-11-13T13:37:26,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38995 {}] regionserver.HRegion(8855): Flush requested on 56afd5c2ff9ec93f3af082ccede46aee 2024-11-13T13:37:26,312 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 56afd5c2ff9ec93f3af082ccede46aee:info, priority=-2147483648, current under compaction store size is 1 2024-11-13T13:37:26,312 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-13T13:37:26,312 DEBUG [RS:0;bfeb2336aed7:38995-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-13T13:37:26,312 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 56afd5c2ff9ec93f3af082ccede46aee 1/1 column families, dataSize=15.76 KB heapSize=17.13 KB 2024-11-13T13:37:26,314 DEBUG [RS:0;bfeb2336aed7:38995-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 53702 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-13T13:37:26,314 DEBUG [RS:0;bfeb2336aed7:38995-shortCompactions-0 {}] regionserver.HStore(1541): 56afd5c2ff9ec93f3af082ccede46aee/info is initiating minor compaction (all files) 2024-11-13T13:37:26,314 INFO [RS:0;bfeb2336aed7:38995-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 56afd5c2ff9ec93f3af082ccede46aee/info in TestLogRolling-testLogRolling,,1731505034127.56afd5c2ff9ec93f3af082ccede46aee. 2024-11-13T13:37:26,314 INFO [RS:0;bfeb2336aed7:38995-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/56afd5c2ff9ec93f3af082ccede46aee/info/c789ae7d7ca34c8b92aff50ae03e2543, hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/56afd5c2ff9ec93f3af082ccede46aee/info/22b50d1ed7ce4e429c2de64443ccc6f3, hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/56afd5c2ff9ec93f3af082ccede46aee/info/07e5022afec140daa33d876227ce36ff] into tmpdir=hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/56afd5c2ff9ec93f3af082ccede46aee/.tmp, totalSize=52.4 K 2024-11-13T13:37:26,315 DEBUG [RS:0;bfeb2336aed7:38995-shortCompactions-0 {}] compactions.Compactor(225): Compacting c789ae7d7ca34c8b92aff50ae03e2543, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1731505044189 2024-11-13T13:37:26,316 DEBUG [RS:0;bfeb2336aed7:38995-shortCompactions-0 {}] compactions.Compactor(225): Compacting 22b50d1ed7ce4e429c2de64443ccc6f3, keycount=22, bloomtype=ROW, size=28.0 K, encoding=NONE, compression=NONE, seqNum=36, earliestPutTs=1731505044204 2024-11-13T13:37:26,316 DEBUG [RS:0;bfeb2336aed7:38995-shortCompactions-0 {}] compactions.Compactor(225): Compacting 07e5022afec140daa33d876227ce36ff, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=46, earliestPutTs=1731505044254 2024-11-13T13:37:26,318 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/56afd5c2ff9ec93f3af082ccede46aee/.tmp/info/095bf480a775403bb518ae385dab88fd is 1080, key is row0037/info:/1731505046273/Put/seqid=0 2024-11-13T13:37:26,324 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33351 is added to blk_1073741840_1016 (size=22222) 2024-11-13T13:37:26,325 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40741 is added to blk_1073741840_1016 (size=22222) 2024-11-13T13:37:26,331 INFO [RS:0;bfeb2336aed7:38995-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 56afd5c2ff9ec93f3af082ccede46aee#info#compaction#59 average throughput is 12.31 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-13T13:37:26,332 DEBUG [RS:0;bfeb2336aed7:38995-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/56afd5c2ff9ec93f3af082ccede46aee/.tmp/info/684c98a5a9084e19a221ef16580b663d is 1080, key is row0001/info:/1731505044189/Put/seqid=0 2024-11-13T13:37:26,339 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40741 is added to blk_1073741841_1017 (size=43901) 2024-11-13T13:37:26,339 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33351 is added to blk_1073741841_1017 (size=43901) 2024-11-13T13:37:26,347 DEBUG [RS:0;bfeb2336aed7:38995-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/56afd5c2ff9ec93f3af082ccede46aee/.tmp/info/684c98a5a9084e19a221ef16580b663d as hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/56afd5c2ff9ec93f3af082ccede46aee/info/684c98a5a9084e19a221ef16580b663d 2024-11-13T13:37:26,356 INFO [RS:0;bfeb2336aed7:38995-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 56afd5c2ff9ec93f3af082ccede46aee/info of 56afd5c2ff9ec93f3af082ccede46aee into 684c98a5a9084e19a221ef16580b663d(size=42.9 K), total size for store is 42.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-13T13:37:26,356 DEBUG [RS:0;bfeb2336aed7:38995-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 56afd5c2ff9ec93f3af082ccede46aee: 2024-11-13T13:37:26,357 INFO [RS:0;bfeb2336aed7:38995-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1731505034127.56afd5c2ff9ec93f3af082ccede46aee., storeName=56afd5c2ff9ec93f3af082ccede46aee/info, priority=13, startTime=1731505046306; duration=0sec 2024-11-13T13:37:26,357 DEBUG [RS:0;bfeb2336aed7:38995-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=42.9 K, sizeToCheck=16.0 K 2024-11-13T13:37:26,357 DEBUG [RS:0;bfeb2336aed7:38995-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-13T13:37:26,357 DEBUG [RS:0;bfeb2336aed7:38995-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/56afd5c2ff9ec93f3af082ccede46aee/info/684c98a5a9084e19a221ef16580b663d because midkey is the same as first or last row 2024-11-13T13:37:26,357 DEBUG [RS:0;bfeb2336aed7:38995-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=42.9 K, sizeToCheck=16.0 K 2024-11-13T13:37:26,357 DEBUG [RS:0;bfeb2336aed7:38995-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-13T13:37:26,357 DEBUG [RS:0;bfeb2336aed7:38995-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/56afd5c2ff9ec93f3af082ccede46aee/info/684c98a5a9084e19a221ef16580b663d because midkey is the same as first or last row 2024-11-13T13:37:26,357 DEBUG [RS:0;bfeb2336aed7:38995-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=42.9 K, sizeToCheck=16.0 K 2024-11-13T13:37:26,357 DEBUG [RS:0;bfeb2336aed7:38995-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-13T13:37:26,357 DEBUG [RS:0;bfeb2336aed7:38995-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/56afd5c2ff9ec93f3af082ccede46aee/info/684c98a5a9084e19a221ef16580b663d because midkey is the same as first or last row 2024-11-13T13:37:26,357 DEBUG [RS:0;bfeb2336aed7:38995-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-13T13:37:26,357 DEBUG [RS:0;bfeb2336aed7:38995-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 56afd5c2ff9ec93f3af082ccede46aee:info 2024-11-13T13:37:26,725 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=16.81 KB at sequenceid=65 (bloomFilter=true), to=hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/56afd5c2ff9ec93f3af082ccede46aee/.tmp/info/095bf480a775403bb518ae385dab88fd 2024-11-13T13:37:26,732 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/56afd5c2ff9ec93f3af082ccede46aee/.tmp/info/095bf480a775403bb518ae385dab88fd as hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/56afd5c2ff9ec93f3af082ccede46aee/info/095bf480a775403bb518ae385dab88fd 2024-11-13T13:37:26,737 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/56afd5c2ff9ec93f3af082ccede46aee/info/095bf480a775403bb518ae385dab88fd, entries=16, sequenceid=65, filesize=21.7 K 2024-11-13T13:37:26,738 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~16.81 KB/17216, heapSize ~18.23 KB/18672, currentSize=12.61 KB/12912 for 56afd5c2ff9ec93f3af082ccede46aee in 426ms, sequenceid=65, compaction requested=false 2024-11-13T13:37:26,738 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 56afd5c2ff9ec93f3af082ccede46aee: 2024-11-13T13:37:26,738 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=64.6 K, sizeToCheck=16.0 K 2024-11-13T13:37:26,738 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-13T13:37:26,738 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/56afd5c2ff9ec93f3af082ccede46aee/info/684c98a5a9084e19a221ef16580b663d because midkey is the same as first or last row 2024-11-13T13:37:27,154 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,33523,1731504891245/bfeb2336aed7%2C33523%2C1731504891245.meta.1731504892258.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:37:27,154 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,38781,1731504892507/bfeb2336aed7%2C38781%2C1731504892507.1731504892747 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:37:28,155 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,38781,1731504892507/bfeb2336aed7%2C38781%2C1731504892507.1731504892747 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:37:28,155 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,33523,1731504891245/bfeb2336aed7%2C33523%2C1731504891245.meta.1731504892258.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:37:28,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38995 {}] regionserver.HRegion(8855): Flush requested on 56afd5c2ff9ec93f3af082ccede46aee 2024-11-13T13:37:28,360 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 56afd5c2ff9ec93f3af082ccede46aee 1/1 column families, dataSize=13.66 KB heapSize=14.88 KB 2024-11-13T13:37:28,364 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/56afd5c2ff9ec93f3af082ccede46aee/.tmp/info/f83aca41976a4defa398c710425c1873 is 1080, key is row0053/info:/1731505046315/Put/seqid=0 2024-11-13T13:37:28,369 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40741 is added to blk_1073741842_1018 (size=18987) 2024-11-13T13:37:28,369 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33351 is added to blk_1073741842_1018 (size=18987) 2024-11-13T13:37:28,394 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38995 {}] regionserver.HRegion(5310): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=56afd5c2ff9ec93f3af082ccede46aee, server=bfeb2336aed7,38995,1731505032992 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:3.0.0-beta-2-SNAPSHOT] 2024-11-13T13:37:28,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38995 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:34298 deadline: 1731505058393, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=56afd5c2ff9ec93f3af082ccede46aee, server=bfeb2336aed7,38995,1731505032992 2024-11-13T13:37:28,422 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,,1731505034127.56afd5c2ff9ec93f3af082ccede46aee., hostname=bfeb2336aed7,38995,1731505032992, seqNum=2 , the old value is region=TestLogRolling-testLogRolling,,1731505034127.56afd5c2ff9ec93f3af082ccede46aee., hostname=bfeb2336aed7,38995,1731505032992, seqNum=2, error=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=56afd5c2ff9ec93f3af082ccede46aee, server=bfeb2336aed7,38995,1731505032992 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-13T13:37:28,423 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,,1731505034127.56afd5c2ff9ec93f3af082ccede46aee., hostname=bfeb2336aed7,38995,1731505032992, seqNum=2 is org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=56afd5c2ff9ec93f3af082ccede46aee, server=bfeb2336aed7,38995,1731505032992 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-13T13:37:28,423 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(76): Will not update region=TestLogRolling-testLogRolling,,1731505034127.56afd5c2ff9ec93f3af082ccede46aee., hostname=bfeb2336aed7,38995,1731505032992, seqNum=2 because the exception is null or not the one we care about 2024-11-13T13:37:28,770 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.66 KB at sequenceid=82 (bloomFilter=true), to=hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/56afd5c2ff9ec93f3af082ccede46aee/.tmp/info/f83aca41976a4defa398c710425c1873 2024-11-13T13:37:28,775 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/56afd5c2ff9ec93f3af082ccede46aee/.tmp/info/f83aca41976a4defa398c710425c1873 as hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/56afd5c2ff9ec93f3af082ccede46aee/info/f83aca41976a4defa398c710425c1873 2024-11-13T13:37:28,780 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/56afd5c2ff9ec93f3af082ccede46aee/info/f83aca41976a4defa398c710425c1873, entries=13, sequenceid=82, filesize=18.5 K 2024-11-13T13:37:28,781 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~13.66 KB/13988, heapSize ~14.86 KB/15216, currentSize=16.81 KB/17216 for 56afd5c2ff9ec93f3af082ccede46aee in 421ms, sequenceid=82, compaction requested=true 2024-11-13T13:37:28,781 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 56afd5c2ff9ec93f3af082ccede46aee: 2024-11-13T13:37:28,781 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=83.1 K, sizeToCheck=16.0 K 2024-11-13T13:37:28,781 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-13T13:37:28,782 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/56afd5c2ff9ec93f3af082ccede46aee/info/684c98a5a9084e19a221ef16580b663d because midkey is the same as first or last row 2024-11-13T13:37:28,782 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 56afd5c2ff9ec93f3af082ccede46aee:info, priority=-2147483648, current under compaction store size is 1 2024-11-13T13:37:28,782 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-13T13:37:28,782 DEBUG [RS:0;bfeb2336aed7:38995-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-13T13:37:28,783 DEBUG [RS:0;bfeb2336aed7:38995-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 85110 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-13T13:37:28,783 DEBUG [RS:0;bfeb2336aed7:38995-shortCompactions-0 {}] regionserver.HStore(1541): 56afd5c2ff9ec93f3af082ccede46aee/info is initiating minor compaction (all files) 2024-11-13T13:37:28,783 INFO [RS:0;bfeb2336aed7:38995-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 56afd5c2ff9ec93f3af082ccede46aee/info in TestLogRolling-testLogRolling,,1731505034127.56afd5c2ff9ec93f3af082ccede46aee. 2024-11-13T13:37:28,783 INFO [RS:0;bfeb2336aed7:38995-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/56afd5c2ff9ec93f3af082ccede46aee/info/684c98a5a9084e19a221ef16580b663d, hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/56afd5c2ff9ec93f3af082ccede46aee/info/095bf480a775403bb518ae385dab88fd, hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/56afd5c2ff9ec93f3af082ccede46aee/info/f83aca41976a4defa398c710425c1873] into tmpdir=hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/56afd5c2ff9ec93f3af082ccede46aee/.tmp, totalSize=83.1 K 2024-11-13T13:37:28,783 DEBUG [RS:0;bfeb2336aed7:38995-shortCompactions-0 {}] compactions.Compactor(225): Compacting 684c98a5a9084e19a221ef16580b663d, keycount=36, bloomtype=ROW, size=42.9 K, encoding=NONE, compression=NONE, seqNum=46, earliestPutTs=1731505044189 2024-11-13T13:37:28,784 DEBUG [RS:0;bfeb2336aed7:38995-shortCompactions-0 {}] compactions.Compactor(225): Compacting 095bf480a775403bb518ae385dab88fd, keycount=16, bloomtype=ROW, size=21.7 K, encoding=NONE, compression=NONE, seqNum=65, earliestPutTs=1731505046273 2024-11-13T13:37:28,784 DEBUG [RS:0;bfeb2336aed7:38995-shortCompactions-0 {}] compactions.Compactor(225): Compacting f83aca41976a4defa398c710425c1873, keycount=13, bloomtype=ROW, size=18.5 K, encoding=NONE, compression=NONE, seqNum=82, earliestPutTs=1731505046315 2024-11-13T13:37:28,796 INFO [RS:0;bfeb2336aed7:38995-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 56afd5c2ff9ec93f3af082ccede46aee#info#compaction#61 average throughput is 22.23 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-13T13:37:28,797 DEBUG [RS:0;bfeb2336aed7:38995-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/56afd5c2ff9ec93f3af082ccede46aee/.tmp/info/5ae30c907d014a109e71cc014ace382b is 1080, key is row0001/info:/1731505044189/Put/seqid=0 2024-11-13T13:37:28,801 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33351 is added to blk_1073741843_1019 (size=75378) 2024-11-13T13:37:28,801 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40741 is added to blk_1073741843_1019 (size=75378) 2024-11-13T13:37:28,807 DEBUG [RS:0;bfeb2336aed7:38995-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/56afd5c2ff9ec93f3af082ccede46aee/.tmp/info/5ae30c907d014a109e71cc014ace382b as hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/56afd5c2ff9ec93f3af082ccede46aee/info/5ae30c907d014a109e71cc014ace382b 2024-11-13T13:37:28,813 INFO [RS:0;bfeb2336aed7:38995-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 56afd5c2ff9ec93f3af082ccede46aee/info of 56afd5c2ff9ec93f3af082ccede46aee into 5ae30c907d014a109e71cc014ace382b(size=73.6 K), total size for store is 73.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-13T13:37:28,813 DEBUG [RS:0;bfeb2336aed7:38995-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 56afd5c2ff9ec93f3af082ccede46aee: 2024-11-13T13:37:28,813 INFO [RS:0;bfeb2336aed7:38995-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1731505034127.56afd5c2ff9ec93f3af082ccede46aee., storeName=56afd5c2ff9ec93f3af082ccede46aee/info, priority=13, startTime=1731505048782; duration=0sec 2024-11-13T13:37:28,813 DEBUG [RS:0;bfeb2336aed7:38995-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=73.6 K, sizeToCheck=16.0 K 2024-11-13T13:37:28,813 DEBUG [RS:0;bfeb2336aed7:38995-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-13T13:37:28,814 DEBUG [RS:0;bfeb2336aed7:38995-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=73.6 K, sizeToCheck=16.0 K 2024-11-13T13:37:28,814 DEBUG [RS:0;bfeb2336aed7:38995-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-13T13:37:28,814 DEBUG [RS:0;bfeb2336aed7:38995-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=73.6 K, sizeToCheck=16.0 K 2024-11-13T13:37:28,814 DEBUG [RS:0;bfeb2336aed7:38995-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-13T13:37:28,815 DEBUG [RS:0;bfeb2336aed7:38995-shortCompactions-0 {}] regionserver.CompactSplit(239): Splitting TestLogRolling-testLogRolling,,1731505034127.56afd5c2ff9ec93f3af082ccede46aee., compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-13T13:37:28,815 DEBUG [RS:0;bfeb2336aed7:38995-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-13T13:37:28,815 DEBUG [RS:0;bfeb2336aed7:38995-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 56afd5c2ff9ec93f3af082ccede46aee:info 2024-11-13T13:37:28,816 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45355 {}] assignment.AssignmentManager(1355): Split request from bfeb2336aed7,38995,1731505032992, parent={ENCODED => 56afd5c2ff9ec93f3af082ccede46aee, NAME => 'TestLogRolling-testLogRolling,,1731505034127.56afd5c2ff9ec93f3af082ccede46aee.', STARTKEY => '', ENDKEY => ''}, splitKey=row0062 2024-11-13T13:37:28,821 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45355 {}] assignment.SplitTableRegionProcedure(223): Splittable=true state=OPEN, location=bfeb2336aed7,38995,1731505032992 2024-11-13T13:37:28,825 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45355 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=56afd5c2ff9ec93f3af082ccede46aee, daughterA=0f561145611ba79a8558052544d3d0a2, daughterB=ef17de14e77e88401d31bb1a26d8cd11 2024-11-13T13:37:28,826 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=56afd5c2ff9ec93f3af082ccede46aee, daughterA=0f561145611ba79a8558052544d3d0a2, daughterB=ef17de14e77e88401d31bb1a26d8cd11 2024-11-13T13:37:28,826 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=56afd5c2ff9ec93f3af082ccede46aee, daughterA=0f561145611ba79a8558052544d3d0a2, daughterB=ef17de14e77e88401d31bb1a26d8cd11 2024-11-13T13:37:28,826 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=56afd5c2ff9ec93f3af082ccede46aee, daughterA=0f561145611ba79a8558052544d3d0a2, daughterB=ef17de14e77e88401d31bb1a26d8cd11 2024-11-13T13:37:28,834 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=56afd5c2ff9ec93f3af082ccede46aee, UNASSIGN}] 2024-11-13T13:37:28,835 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=56afd5c2ff9ec93f3af082ccede46aee, UNASSIGN 2024-11-13T13:37:28,837 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=8 updating hbase:meta row=56afd5c2ff9ec93f3af082ccede46aee, regionState=CLOSING, regionLocation=bfeb2336aed7,38995,1731505032992 2024-11-13T13:37:28,839 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=56afd5c2ff9ec93f3af082ccede46aee, UNASSIGN because future has completed 2024-11-13T13:37:28,840 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: true: evictOnSplit: true: evictOnClose: false 2024-11-13T13:37:28,840 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=9, ppid=8, state=RUNNABLE, hasLock=false; CloseRegionProcedure 56afd5c2ff9ec93f3af082ccede46aee, server=bfeb2336aed7,38995,1731505032992}] 2024-11-13T13:37:28,997 INFO [RS_CLOSE_REGION-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] handler.UnassignRegionHandler(122): Close 56afd5c2ff9ec93f3af082ccede46aee 2024-11-13T13:37:28,997 DEBUG [RS_CLOSE_REGION-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] handler.UnassignRegionHandler(136): Unassign region: split region: true: evictCache: true 2024-11-13T13:37:28,998 DEBUG [RS_CLOSE_REGION-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1722): Closing 56afd5c2ff9ec93f3af082ccede46aee, disabling compactions & flushes 2024-11-13T13:37:28,998 INFO [RS_CLOSE_REGION-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1731505034127.56afd5c2ff9ec93f3af082ccede46aee. 2024-11-13T13:37:28,998 DEBUG [RS_CLOSE_REGION-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1731505034127.56afd5c2ff9ec93f3af082ccede46aee. 2024-11-13T13:37:28,998 DEBUG [RS_CLOSE_REGION-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1731505034127.56afd5c2ff9ec93f3af082ccede46aee. after waiting 0 ms 2024-11-13T13:37:28,998 DEBUG [RS_CLOSE_REGION-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1731505034127.56afd5c2ff9ec93f3af082ccede46aee. 2024-11-13T13:37:28,998 INFO [RS_CLOSE_REGION-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(2902): Flushing 56afd5c2ff9ec93f3af082ccede46aee 1/1 column families, dataSize=16.81 KB heapSize=18.25 KB 2024-11-13T13:37:29,004 DEBUG [RS_CLOSE_REGION-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/56afd5c2ff9ec93f3af082ccede46aee/.tmp/info/da5c57994b174d6796355e16ac02b6e2 is 1080, key is row0066/info:/1731505048361/Put/seqid=0 2024-11-13T13:37:29,011 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40741 is added to blk_1073741844_1020 (size=22222) 2024-11-13T13:37:29,011 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33351 is added to blk_1073741844_1020 (size=22222) 2024-11-13T13:37:29,011 INFO [RS_CLOSE_REGION-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=16.81 KB at sequenceid=102 (bloomFilter=true), to=hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/56afd5c2ff9ec93f3af082ccede46aee/.tmp/info/da5c57994b174d6796355e16ac02b6e2 2024-11-13T13:37:29,017 DEBUG [RS_CLOSE_REGION-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/56afd5c2ff9ec93f3af082ccede46aee/.tmp/info/da5c57994b174d6796355e16ac02b6e2 as hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/56afd5c2ff9ec93f3af082ccede46aee/info/da5c57994b174d6796355e16ac02b6e2 2024-11-13T13:37:29,023 INFO [RS_CLOSE_REGION-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/56afd5c2ff9ec93f3af082ccede46aee/info/da5c57994b174d6796355e16ac02b6e2, entries=16, sequenceid=102, filesize=21.7 K 2024-11-13T13:37:29,024 INFO [RS_CLOSE_REGION-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(3140): Finished flush of dataSize ~16.81 KB/17216, heapSize ~18.23 KB/18672, currentSize=0 B/0 for 56afd5c2ff9ec93f3af082ccede46aee in 26ms, sequenceid=102, compaction requested=false 2024-11-13T13:37:29,025 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731505034127.56afd5c2ff9ec93f3af082ccede46aee.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/56afd5c2ff9ec93f3af082ccede46aee/info/c789ae7d7ca34c8b92aff50ae03e2543, hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/56afd5c2ff9ec93f3af082ccede46aee/info/22b50d1ed7ce4e429c2de64443ccc6f3, hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/56afd5c2ff9ec93f3af082ccede46aee/info/684c98a5a9084e19a221ef16580b663d, hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/56afd5c2ff9ec93f3af082ccede46aee/info/07e5022afec140daa33d876227ce36ff, hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/56afd5c2ff9ec93f3af082ccede46aee/info/095bf480a775403bb518ae385dab88fd, hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/56afd5c2ff9ec93f3af082ccede46aee/info/f83aca41976a4defa398c710425c1873] to archive 2024-11-13T13:37:29,026 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731505034127.56afd5c2ff9ec93f3af082ccede46aee.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-13T13:37:29,027 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731505034127.56afd5c2ff9ec93f3af082ccede46aee.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/56afd5c2ff9ec93f3af082ccede46aee/info/c789ae7d7ca34c8b92aff50ae03e2543 to hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/archive/data/default/TestLogRolling-testLogRolling/56afd5c2ff9ec93f3af082ccede46aee/info/c789ae7d7ca34c8b92aff50ae03e2543 2024-11-13T13:37:29,029 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731505034127.56afd5c2ff9ec93f3af082ccede46aee.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/56afd5c2ff9ec93f3af082ccede46aee/info/22b50d1ed7ce4e429c2de64443ccc6f3 to hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/archive/data/default/TestLogRolling-testLogRolling/56afd5c2ff9ec93f3af082ccede46aee/info/22b50d1ed7ce4e429c2de64443ccc6f3 2024-11-13T13:37:29,030 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731505034127.56afd5c2ff9ec93f3af082ccede46aee.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/56afd5c2ff9ec93f3af082ccede46aee/info/684c98a5a9084e19a221ef16580b663d to hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/archive/data/default/TestLogRolling-testLogRolling/56afd5c2ff9ec93f3af082ccede46aee/info/684c98a5a9084e19a221ef16580b663d 2024-11-13T13:37:29,031 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731505034127.56afd5c2ff9ec93f3af082ccede46aee.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/56afd5c2ff9ec93f3af082ccede46aee/info/07e5022afec140daa33d876227ce36ff to hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/archive/data/default/TestLogRolling-testLogRolling/56afd5c2ff9ec93f3af082ccede46aee/info/07e5022afec140daa33d876227ce36ff 2024-11-13T13:37:29,032 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731505034127.56afd5c2ff9ec93f3af082ccede46aee.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/56afd5c2ff9ec93f3af082ccede46aee/info/095bf480a775403bb518ae385dab88fd to hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/archive/data/default/TestLogRolling-testLogRolling/56afd5c2ff9ec93f3af082ccede46aee/info/095bf480a775403bb518ae385dab88fd 2024-11-13T13:37:29,033 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731505034127.56afd5c2ff9ec93f3af082ccede46aee.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/56afd5c2ff9ec93f3af082ccede46aee/info/f83aca41976a4defa398c710425c1873 to hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/archive/data/default/TestLogRolling-testLogRolling/56afd5c2ff9ec93f3af082ccede46aee/info/f83aca41976a4defa398c710425c1873 2024-11-13T13:37:29,040 DEBUG [RS_CLOSE_REGION-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/56afd5c2ff9ec93f3af082ccede46aee/recovered.edits/105.seqid, newMaxSeqId=105, maxSeqId=1 2024-11-13T13:37:29,041 INFO [RS_CLOSE_REGION-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1731505034127.56afd5c2ff9ec93f3af082ccede46aee. 2024-11-13T13:37:29,041 DEBUG [RS_CLOSE_REGION-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1676): Region close journal for 56afd5c2ff9ec93f3af082ccede46aee: Waiting for close lock at 1731505048998Running coprocessor pre-close hooks at 1731505048998Disabling compacts and flushes for region at 1731505048998Disabling writes for close at 1731505048998Obtaining lock to block concurrent updates at 1731505048998Preparing flush snapshotting stores in 56afd5c2ff9ec93f3af082ccede46aee at 1731505048998Finished memstore snapshotting TestLogRolling-testLogRolling,,1731505034127.56afd5c2ff9ec93f3af082ccede46aee., syncing WAL and waiting on mvcc, flushsize=dataSize=17216, getHeapSize=18672, getOffHeapSize=0, getCellsCount=16 at 1731505048999 (+1 ms)Flushing stores of TestLogRolling-testLogRolling,,1731505034127.56afd5c2ff9ec93f3af082ccede46aee. at 1731505048999Flushing 56afd5c2ff9ec93f3af082ccede46aee/info: creating writer at 1731505048999Flushing 56afd5c2ff9ec93f3af082ccede46aee/info: appending metadata at 1731505049003 (+4 ms)Flushing 56afd5c2ff9ec93f3af082ccede46aee/info: closing flushed file at 1731505049003Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6db87672: reopening flushed file at 1731505049016 (+13 ms)Finished flush of dataSize ~16.81 KB/17216, heapSize ~18.23 KB/18672, currentSize=0 B/0 for 56afd5c2ff9ec93f3af082ccede46aee in 26ms, sequenceid=102, compaction requested=false at 1731505049024 (+8 ms)Writing region close event to WAL at 1731505049036 (+12 ms)Running coprocessor post-close hooks at 1731505049040 (+4 ms)Closed at 1731505049040 2024-11-13T13:37:29,043 INFO [RS_CLOSE_REGION-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] handler.UnassignRegionHandler(157): Closed 56afd5c2ff9ec93f3af082ccede46aee 2024-11-13T13:37:29,044 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=8 updating hbase:meta row=56afd5c2ff9ec93f3af082ccede46aee, regionState=CLOSED 2024-11-13T13:37:29,046 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=9, ppid=8, state=RUNNABLE, hasLock=false; CloseRegionProcedure 56afd5c2ff9ec93f3af082ccede46aee, server=bfeb2336aed7,38995,1731505032992 because future has completed 2024-11-13T13:37:29,050 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=9, resume processing ppid=8 2024-11-13T13:37:29,050 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=9, ppid=8, state=SUCCESS, hasLock=false; CloseRegionProcedure 56afd5c2ff9ec93f3af082ccede46aee, server=bfeb2336aed7,38995,1731505032992 in 208 msec 2024-11-13T13:37:29,053 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-11-13T13:37:29,053 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=56afd5c2ff9ec93f3af082ccede46aee, UNASSIGN in 217 msec 2024-11-13T13:37:29,062 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T13:37:29,065 INFO [PEWorker-4 {}] assignment.SplitTableRegionProcedure(728): pid=7 splitting 2 storefiles, region=56afd5c2ff9ec93f3af082ccede46aee, threads=2 2024-11-13T13:37:29,067 DEBUG [StoreFileSplitter-pool-1 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/56afd5c2ff9ec93f3af082ccede46aee/info/da5c57994b174d6796355e16ac02b6e2 for region: 56afd5c2ff9ec93f3af082ccede46aee 2024-11-13T13:37:29,067 DEBUG [StoreFileSplitter-pool-0 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/56afd5c2ff9ec93f3af082ccede46aee/info/5ae30c907d014a109e71cc014ace382b for region: 56afd5c2ff9ec93f3af082ccede46aee 2024-11-13T13:37:29,078 DEBUG [StoreFileSplitter-pool-1 {}] regionserver.HRegionFileSystem(650): Will create HFileLink file for hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/56afd5c2ff9ec93f3af082ccede46aee/info/da5c57994b174d6796355e16ac02b6e2, top=true 2024-11-13T13:37:29,109 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40741 is added to blk_1073741845_1021 (size=27) 2024-11-13T13:37:29,109 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33351 is added to blk_1073741845_1021 (size=27) 2024-11-13T13:37:29,111 INFO [StoreFileSplitter-pool-1 {}] regionserver.HRegionFileSystem(691): Created linkFile:hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/ef17de14e77e88401d31bb1a26d8cd11/info/TestLogRolling-testLogRolling=56afd5c2ff9ec93f3af082ccede46aee-da5c57994b174d6796355e16ac02b6e2 for child: ef17de14e77e88401d31bb1a26d8cd11, parent: 56afd5c2ff9ec93f3af082ccede46aee 2024-11-13T13:37:29,111 DEBUG [StoreFileSplitter-pool-1 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/56afd5c2ff9ec93f3af082ccede46aee/info/da5c57994b174d6796355e16ac02b6e2 for region: 56afd5c2ff9ec93f3af082ccede46aee 2024-11-13T13:37:29,122 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40741 is added to blk_1073741846_1022 (size=27) 2024-11-13T13:37:29,122 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33351 is added to blk_1073741846_1022 (size=27) 2024-11-13T13:37:29,122 DEBUG [StoreFileSplitter-pool-0 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/56afd5c2ff9ec93f3af082ccede46aee/info/5ae30c907d014a109e71cc014ace382b for region: 56afd5c2ff9ec93f3af082ccede46aee 2024-11-13T13:37:29,125 DEBUG [PEWorker-4 {}] assignment.SplitTableRegionProcedure(802): pid=7 split storefiles for region 56afd5c2ff9ec93f3af082ccede46aee Daughter A: [hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/0f561145611ba79a8558052544d3d0a2/info/5ae30c907d014a109e71cc014ace382b.56afd5c2ff9ec93f3af082ccede46aee] storefiles, Daughter B: [hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/ef17de14e77e88401d31bb1a26d8cd11/info/5ae30c907d014a109e71cc014ace382b.56afd5c2ff9ec93f3af082ccede46aee, hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/ef17de14e77e88401d31bb1a26d8cd11/info/TestLogRolling-testLogRolling=56afd5c2ff9ec93f3af082ccede46aee-da5c57994b174d6796355e16ac02b6e2] storefiles. 2024-11-13T13:37:29,132 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40741 is added to blk_1073741847_1023 (size=71) 2024-11-13T13:37:29,134 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33351 is added to blk_1073741847_1023 (size=71) 2024-11-13T13:37:29,135 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T13:37:29,147 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33351 is added to blk_1073741848_1024 (size=71) 2024-11-13T13:37:29,147 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40741 is added to blk_1073741848_1024 (size=71) 2024-11-13T13:37:29,149 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T13:37:29,156 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,33523,1731504891245/bfeb2336aed7%2C33523%2C1731504891245.meta.1731504892258.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:37:29,156 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,38781,1731504892507/bfeb2336aed7%2C38781%2C1731504892507.1731504892747 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:37:29,158 DEBUG [PEWorker-4 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/0f561145611ba79a8558052544d3d0a2/recovered.edits/105.seqid, newMaxSeqId=105, maxSeqId=-1 2024-11-13T13:37:29,160 DEBUG [PEWorker-4 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/ef17de14e77e88401d31bb1a26d8cd11/recovered.edits/105.seqid, newMaxSeqId=105, maxSeqId=-1 2024-11-13T13:37:29,162 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,,1731505034127.56afd5c2ff9ec93f3af082ccede46aee.","families":{"info":[{"qualifier":"regioninfo","vlen":63,"tag":[],"timestamp":"1731505049162"},{"qualifier":"splitA","vlen":70,"tag":[],"timestamp":"1731505049162"},{"qualifier":"splitB","vlen":70,"tag":[],"timestamp":"1731505049162"}]},"ts":"1731505049162"} 2024-11-13T13:37:29,162 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,,1731505048821.0f561145611ba79a8558052544d3d0a2.","families":{"info":[{"qualifier":"regioninfo","vlen":70,"tag":[],"timestamp":"1731505049162"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731505049162"},{"qualifier":"seqnumDuringOpen","vlen":8,"tag":[],"timestamp":"1731505049162"}]},"ts":"1731505049162"} 2024-11-13T13:37:29,163 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,row0062,1731505048821.ef17de14e77e88401d31bb1a26d8cd11.","families":{"info":[{"qualifier":"regioninfo","vlen":70,"tag":[],"timestamp":"1731505049162"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731505049162"},{"qualifier":"seqnumDuringOpen","vlen":8,"tag":[],"timestamp":"1731505049162"}]},"ts":"1731505049162"} 2024-11-13T13:37:29,180 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=0f561145611ba79a8558052544d3d0a2, ASSIGN}, {pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=ef17de14e77e88401d31bb1a26d8cd11, ASSIGN}] 2024-11-13T13:37:29,182 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=0f561145611ba79a8558052544d3d0a2, ASSIGN 2024-11-13T13:37:29,182 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=ef17de14e77e88401d31bb1a26d8cd11, ASSIGN 2024-11-13T13:37:29,183 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=0f561145611ba79a8558052544d3d0a2, ASSIGN; state=SPLITTING_NEW, location=bfeb2336aed7,38995,1731505032992; forceNewPlan=false, retain=false 2024-11-13T13:37:29,183 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=ef17de14e77e88401d31bb1a26d8cd11, ASSIGN; state=SPLITTING_NEW, location=bfeb2336aed7,38995,1731505032992; forceNewPlan=false, retain=false 2024-11-13T13:37:29,333 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=11 updating hbase:meta row=ef17de14e77e88401d31bb1a26d8cd11, regionState=OPENING, regionLocation=bfeb2336aed7,38995,1731505032992 2024-11-13T13:37:29,333 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=10 updating hbase:meta row=0f561145611ba79a8558052544d3d0a2, regionState=OPENING, regionLocation=bfeb2336aed7,38995,1731505032992 2024-11-13T13:37:29,336 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=ef17de14e77e88401d31bb1a26d8cd11, ASSIGN because future has completed 2024-11-13T13:37:29,337 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=12, ppid=11, state=RUNNABLE, hasLock=false; OpenRegionProcedure ef17de14e77e88401d31bb1a26d8cd11, server=bfeb2336aed7,38995,1731505032992}] 2024-11-13T13:37:29,338 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=0f561145611ba79a8558052544d3d0a2, ASSIGN because future has completed 2024-11-13T13:37:29,339 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=13, ppid=10, state=RUNNABLE, hasLock=false; OpenRegionProcedure 0f561145611ba79a8558052544d3d0a2, server=bfeb2336aed7,38995,1731505032992}] 2024-11-13T13:37:29,496 INFO [RS_OPEN_REGION-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,row0062,1731505048821.ef17de14e77e88401d31bb1a26d8cd11. 2024-11-13T13:37:29,496 DEBUG [RS_OPEN_REGION-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7752): Opening region: {ENCODED => ef17de14e77e88401d31bb1a26d8cd11, NAME => 'TestLogRolling-testLogRolling,row0062,1731505048821.ef17de14e77e88401d31bb1a26d8cd11.', STARTKEY => 'row0062', ENDKEY => ''} 2024-11-13T13:37:29,496 DEBUG [RS_OPEN_REGION-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling ef17de14e77e88401d31bb1a26d8cd11 2024-11-13T13:37:29,496 DEBUG [RS_OPEN_REGION-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,row0062,1731505048821.ef17de14e77e88401d31bb1a26d8cd11.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T13:37:29,497 DEBUG [RS_OPEN_REGION-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7794): checking encryption for ef17de14e77e88401d31bb1a26d8cd11 2024-11-13T13:37:29,497 DEBUG [RS_OPEN_REGION-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7797): checking classloading for ef17de14e77e88401d31bb1a26d8cd11 2024-11-13T13:37:29,499 INFO [StoreOpener-ef17de14e77e88401d31bb1a26d8cd11-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region ef17de14e77e88401d31bb1a26d8cd11 2024-11-13T13:37:29,500 INFO [StoreOpener-ef17de14e77e88401d31bb1a26d8cd11-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region ef17de14e77e88401d31bb1a26d8cd11 columnFamilyName info 2024-11-13T13:37:29,500 DEBUG [StoreOpener-ef17de14e77e88401d31bb1a26d8cd11-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T13:37:29,512 DEBUG [StoreOpener-ef17de14e77e88401d31bb1a26d8cd11-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/ef17de14e77e88401d31bb1a26d8cd11/info/5ae30c907d014a109e71cc014ace382b.56afd5c2ff9ec93f3af082ccede46aee->hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/56afd5c2ff9ec93f3af082ccede46aee/info/5ae30c907d014a109e71cc014ace382b-top 2024-11-13T13:37:29,518 DEBUG [StoreOpener-ef17de14e77e88401d31bb1a26d8cd11-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/ef17de14e77e88401d31bb1a26d8cd11/info/TestLogRolling-testLogRolling=56afd5c2ff9ec93f3af082ccede46aee-da5c57994b174d6796355e16ac02b6e2 2024-11-13T13:37:29,518 INFO [StoreOpener-ef17de14e77e88401d31bb1a26d8cd11-1 {}] regionserver.HStore(327): Store=ef17de14e77e88401d31bb1a26d8cd11/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T13:37:29,518 DEBUG [RS_OPEN_REGION-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1038): replaying wal for ef17de14e77e88401d31bb1a26d8cd11 2024-11-13T13:37:29,519 DEBUG [RS_OPEN_REGION-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/ef17de14e77e88401d31bb1a26d8cd11 2024-11-13T13:37:29,520 DEBUG [RS_OPEN_REGION-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/ef17de14e77e88401d31bb1a26d8cd11 2024-11-13T13:37:29,521 DEBUG [RS_OPEN_REGION-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1048): stopping wal replay for ef17de14e77e88401d31bb1a26d8cd11 2024-11-13T13:37:29,521 DEBUG [RS_OPEN_REGION-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1060): Cleaning up temporary data for ef17de14e77e88401d31bb1a26d8cd11 2024-11-13T13:37:29,522 DEBUG [RS_OPEN_REGION-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1093): writing seq id for ef17de14e77e88401d31bb1a26d8cd11 2024-11-13T13:37:29,523 INFO [RS_OPEN_REGION-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1114): Opened ef17de14e77e88401d31bb1a26d8cd11; next sequenceid=106; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=847352, jitterRate=0.07746435701847076}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-13T13:37:29,523 DEBUG [RS_OPEN_REGION-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1122): Running coprocessor post-open hooks for ef17de14e77e88401d31bb1a26d8cd11 2024-11-13T13:37:29,524 DEBUG [RS_OPEN_REGION-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1006): Region open journal for ef17de14e77e88401d31bb1a26d8cd11: Running coprocessor pre-open hook at 1731505049497Writing region info on filesystem at 1731505049497Initializing all the Stores at 1731505049498 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731505049498Cleaning up temporary data from old regions at 1731505049521 (+23 ms)Running coprocessor post-open hooks at 1731505049523 (+2 ms)Region opened successfully at 1731505049524 (+1 ms) 2024-11-13T13:37:29,525 INFO [RS_OPEN_REGION-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,row0062,1731505048821.ef17de14e77e88401d31bb1a26d8cd11., pid=12, masterSystemTime=1731505049491 2024-11-13T13:37:29,525 DEBUG [RS_OPEN_REGION-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.CompactSplit(403): Add compact mark for store ef17de14e77e88401d31bb1a26d8cd11:info, priority=-2147483648, current under compaction store size is 1 2024-11-13T13:37:29,525 DEBUG [RS_OPEN_REGION-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: Opening Region; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-13T13:37:29,525 DEBUG [RS:0;bfeb2336aed7:38995-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 2 store files, 0 compacting, 2 eligible, 16 blocking 2024-11-13T13:37:29,526 INFO [RS:0;bfeb2336aed7:38995-shortCompactions-0 {}] regionserver.HStore(1527): Keeping/Overriding Compaction request priority to -2147482648 for CF info since it belongs to recently split daughter region TestLogRolling-testLogRolling,row0062,1731505048821.ef17de14e77e88401d31bb1a26d8cd11. 2024-11-13T13:37:29,526 DEBUG [RS:0;bfeb2336aed7:38995-shortCompactions-0 {}] regionserver.HStore(1541): ef17de14e77e88401d31bb1a26d8cd11/info is initiating minor compaction (all files) 2024-11-13T13:37:29,526 INFO [RS:0;bfeb2336aed7:38995-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of ef17de14e77e88401d31bb1a26d8cd11/info in TestLogRolling-testLogRolling,row0062,1731505048821.ef17de14e77e88401d31bb1a26d8cd11. 2024-11-13T13:37:29,527 INFO [RS:0;bfeb2336aed7:38995-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/ef17de14e77e88401d31bb1a26d8cd11/info/5ae30c907d014a109e71cc014ace382b.56afd5c2ff9ec93f3af082ccede46aee->hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/56afd5c2ff9ec93f3af082ccede46aee/info/5ae30c907d014a109e71cc014ace382b-top, hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/ef17de14e77e88401d31bb1a26d8cd11/info/TestLogRolling-testLogRolling=56afd5c2ff9ec93f3af082ccede46aee-da5c57994b174d6796355e16ac02b6e2] into tmpdir=hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/ef17de14e77e88401d31bb1a26d8cd11/.tmp, totalSize=95.3 K 2024-11-13T13:37:29,528 DEBUG [RS:0;bfeb2336aed7:38995-shortCompactions-0 {}] compactions.Compactor(225): Compacting 5ae30c907d014a109e71cc014ace382b.56afd5c2ff9ec93f3af082ccede46aee, keycount=32, bloomtype=ROW, size=73.6 K, encoding=NONE, compression=NONE, seqNum=83, earliestPutTs=1731505044189 2024-11-13T13:37:29,528 DEBUG [RS_OPEN_REGION-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,row0062,1731505048821.ef17de14e77e88401d31bb1a26d8cd11. 2024-11-13T13:37:29,528 INFO [RS_OPEN_REGION-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,row0062,1731505048821.ef17de14e77e88401d31bb1a26d8cd11. 2024-11-13T13:37:29,528 INFO [RS_OPEN_REGION-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,,1731505048821.0f561145611ba79a8558052544d3d0a2. 2024-11-13T13:37:29,528 DEBUG [RS_OPEN_REGION-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7752): Opening region: {ENCODED => 0f561145611ba79a8558052544d3d0a2, NAME => 'TestLogRolling-testLogRolling,,1731505048821.0f561145611ba79a8558052544d3d0a2.', STARTKEY => '', ENDKEY => 'row0062'} 2024-11-13T13:37:29,528 DEBUG [RS:0;bfeb2336aed7:38995-shortCompactions-0 {}] compactions.Compactor(225): Compacting TestLogRolling-testLogRolling=56afd5c2ff9ec93f3af082ccede46aee-da5c57994b174d6796355e16ac02b6e2, keycount=16, bloomtype=ROW, size=21.7 K, encoding=NONE, compression=NONE, seqNum=102, earliestPutTs=1731505048361 2024-11-13T13:37:29,528 DEBUG [RS_OPEN_REGION-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling 0f561145611ba79a8558052544d3d0a2 2024-11-13T13:37:29,528 DEBUG [RS_OPEN_REGION-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1731505048821.0f561145611ba79a8558052544d3d0a2.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T13:37:29,529 DEBUG [RS_OPEN_REGION-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7794): checking encryption for 0f561145611ba79a8558052544d3d0a2 2024-11-13T13:37:29,529 DEBUG [RS_OPEN_REGION-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7797): checking classloading for 0f561145611ba79a8558052544d3d0a2 2024-11-13T13:37:29,529 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=11 updating hbase:meta row=ef17de14e77e88401d31bb1a26d8cd11, regionState=OPEN, openSeqNum=106, regionLocation=bfeb2336aed7,38995,1731505032992 2024-11-13T13:37:29,530 INFO [StoreOpener-0f561145611ba79a8558052544d3d0a2-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 0f561145611ba79a8558052544d3d0a2 2024-11-13T13:37:29,531 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38995 {}] regionserver.HRegion(8855): Flush requested on 1588230740 2024-11-13T13:37:29,531 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=12, ppid=11, state=RUNNABLE, hasLock=false; OpenRegionProcedure ef17de14e77e88401d31bb1a26d8cd11, server=bfeb2336aed7,38995,1731505032992 because future has completed 2024-11-13T13:37:29,531 INFO [StoreOpener-0f561145611ba79a8558052544d3d0a2-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 0f561145611ba79a8558052544d3d0a2 columnFamilyName info 2024-11-13T13:37:29,531 DEBUG [MemStoreFlusher.0 {}] regionserver.FlushAllLargeStoresPolicy(69): Since none of the CFs were above the size, flushing all. 2024-11-13T13:37:29,531 DEBUG [StoreOpener-0f561145611ba79a8558052544d3d0a2-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T13:37:29,531 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=5.15 KB heapSize=9 KB 2024-11-13T13:37:29,535 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=12, resume processing ppid=11 2024-11-13T13:37:29,536 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=12, ppid=11, state=SUCCESS, hasLock=false; OpenRegionProcedure ef17de14e77e88401d31bb1a26d8cd11, server=bfeb2336aed7,38995,1731505032992 in 196 msec 2024-11-13T13:37:29,537 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=11, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=ef17de14e77e88401d31bb1a26d8cd11, ASSIGN in 356 msec 2024-11-13T13:37:29,553 DEBUG [StoreOpener-0f561145611ba79a8558052544d3d0a2-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/0f561145611ba79a8558052544d3d0a2/info/5ae30c907d014a109e71cc014ace382b.56afd5c2ff9ec93f3af082ccede46aee->hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/56afd5c2ff9ec93f3af082ccede46aee/info/5ae30c907d014a109e71cc014ace382b-bottom 2024-11-13T13:37:29,554 INFO [StoreOpener-0f561145611ba79a8558052544d3d0a2-1 {}] regionserver.HStore(327): Store=0f561145611ba79a8558052544d3d0a2/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T13:37:29,554 DEBUG [RS_OPEN_REGION-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1038): replaying wal for 0f561145611ba79a8558052544d3d0a2 2024-11-13T13:37:29,555 DEBUG [RS_OPEN_REGION-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/0f561145611ba79a8558052544d3d0a2 2024-11-13T13:37:29,555 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/hbase/meta/1588230740/.tmp/info/d81e6e42a0c04587890d8f08dcf4e403 is 193, key is TestLogRolling-testLogRolling,row0062,1731505048821.ef17de14e77e88401d31bb1a26d8cd11./info:regioninfo/1731505049528/Put/seqid=0 2024-11-13T13:37:29,555 INFO [RS:0;bfeb2336aed7:38995-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): ef17de14e77e88401d31bb1a26d8cd11#info#compaction#64 average throughput is 20.52 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-13T13:37:29,555 DEBUG [RS:0;bfeb2336aed7:38995-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/ef17de14e77e88401d31bb1a26d8cd11/.tmp/info/2c805ed796c8497e87b801daea923b63 is 1080, key is row0062/info:/1731505046351/Put/seqid=0 2024-11-13T13:37:29,556 DEBUG [RS_OPEN_REGION-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/0f561145611ba79a8558052544d3d0a2 2024-11-13T13:37:29,557 DEBUG [RS_OPEN_REGION-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1048): stopping wal replay for 0f561145611ba79a8558052544d3d0a2 2024-11-13T13:37:29,557 DEBUG [RS_OPEN_REGION-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1060): Cleaning up temporary data for 0f561145611ba79a8558052544d3d0a2 2024-11-13T13:37:29,559 DEBUG [RS_OPEN_REGION-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1093): writing seq id for 0f561145611ba79a8558052544d3d0a2 2024-11-13T13:37:29,560 INFO [RS_OPEN_REGION-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1114): Opened 0f561145611ba79a8558052544d3d0a2; next sequenceid=106; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=802034, jitterRate=0.01983959972858429}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-13T13:37:29,560 DEBUG [RS_OPEN_REGION-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 0f561145611ba79a8558052544d3d0a2 2024-11-13T13:37:29,560 DEBUG [RS_OPEN_REGION-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1006): Region open journal for 0f561145611ba79a8558052544d3d0a2: Running coprocessor pre-open hook at 1731505049529Writing region info on filesystem at 1731505049529Initializing all the Stores at 1731505049530 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731505049530Cleaning up temporary data from old regions at 1731505049557 (+27 ms)Running coprocessor post-open hooks at 1731505049560 (+3 ms)Region opened successfully at 1731505049560 2024-11-13T13:37:29,561 INFO [RS_OPEN_REGION-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,,1731505048821.0f561145611ba79a8558052544d3d0a2., pid=13, masterSystemTime=1731505049491 2024-11-13T13:37:29,561 DEBUG [RS_OPEN_REGION-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.CompactSplit(403): Add compact mark for store 0f561145611ba79a8558052544d3d0a2:info, priority=-2147483648, current under compaction store size is 2 2024-11-13T13:37:29,561 DEBUG [RS:0;bfeb2336aed7:38995-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 1 store files, 0 compacting, 1 eligible, 16 blocking 2024-11-13T13:37:29,561 DEBUG [RS_OPEN_REGION-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: Opening Region; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-13T13:37:29,562 INFO [RS:0;bfeb2336aed7:38995-longCompactions-0 {}] regionserver.HStore(1527): Keeping/Overriding Compaction request priority to -2147482648 for CF info since it belongs to recently split daughter region TestLogRolling-testLogRolling,,1731505048821.0f561145611ba79a8558052544d3d0a2. 2024-11-13T13:37:29,562 DEBUG [RS:0;bfeb2336aed7:38995-longCompactions-0 {}] regionserver.HStore(1541): 0f561145611ba79a8558052544d3d0a2/info is initiating minor compaction (all files) 2024-11-13T13:37:29,562 INFO [RS:0;bfeb2336aed7:38995-longCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 0f561145611ba79a8558052544d3d0a2/info in TestLogRolling-testLogRolling,,1731505048821.0f561145611ba79a8558052544d3d0a2. 2024-11-13T13:37:29,562 INFO [RS:0;bfeb2336aed7:38995-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/0f561145611ba79a8558052544d3d0a2/info/5ae30c907d014a109e71cc014ace382b.56afd5c2ff9ec93f3af082ccede46aee->hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/56afd5c2ff9ec93f3af082ccede46aee/info/5ae30c907d014a109e71cc014ace382b-bottom] into tmpdir=hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/0f561145611ba79a8558052544d3d0a2/.tmp, totalSize=73.6 K 2024-11-13T13:37:29,562 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33351 is added to blk_1073741849_1025 (size=9882) 2024-11-13T13:37:29,563 DEBUG [RS:0;bfeb2336aed7:38995-longCompactions-0 {}] compactions.Compactor(225): Compacting 5ae30c907d014a109e71cc014ace382b.56afd5c2ff9ec93f3af082ccede46aee, keycount=32, bloomtype=ROW, size=73.6 K, encoding=NONE, compression=NONE, seqNum=82, earliestPutTs=1731505044189 2024-11-13T13:37:29,564 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40741 is added to blk_1073741849_1025 (size=9882) 2024-11-13T13:37:29,564 DEBUG [RS_OPEN_REGION-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,,1731505048821.0f561145611ba79a8558052544d3d0a2. 2024-11-13T13:37:29,564 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33351 is added to blk_1073741850_1026 (size=26696) 2024-11-13T13:37:29,564 INFO [RS_OPEN_REGION-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,,1731505048821.0f561145611ba79a8558052544d3d0a2. 2024-11-13T13:37:29,564 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40741 is added to blk_1073741850_1026 (size=26696) 2024-11-13T13:37:29,565 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.95 KB at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/hbase/meta/1588230740/.tmp/info/d81e6e42a0c04587890d8f08dcf4e403 2024-11-13T13:37:29,565 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=10 updating hbase:meta row=0f561145611ba79a8558052544d3d0a2, regionState=OPEN, openSeqNum=106, regionLocation=bfeb2336aed7,38995,1731505032992 2024-11-13T13:37:29,568 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=13, ppid=10, state=RUNNABLE, hasLock=false; OpenRegionProcedure 0f561145611ba79a8558052544d3d0a2, server=bfeb2336aed7,38995,1731505032992 because future has completed 2024-11-13T13:37:29,575 DEBUG [RS:0;bfeb2336aed7:38995-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/ef17de14e77e88401d31bb1a26d8cd11/.tmp/info/2c805ed796c8497e87b801daea923b63 as hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/ef17de14e77e88401d31bb1a26d8cd11/info/2c805ed796c8497e87b801daea923b63 2024-11-13T13:37:29,576 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=13, resume processing ppid=10 2024-11-13T13:37:29,576 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=13, ppid=10, state=SUCCESS, hasLock=false; OpenRegionProcedure 0f561145611ba79a8558052544d3d0a2, server=bfeb2336aed7,38995,1731505032992 in 234 msec 2024-11-13T13:37:29,580 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=10, resume processing ppid=7 2024-11-13T13:37:29,580 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=10, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=0f561145611ba79a8558052544d3d0a2, ASSIGN in 396 msec 2024-11-13T13:37:29,583 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=56afd5c2ff9ec93f3af082ccede46aee, daughterA=0f561145611ba79a8558052544d3d0a2, daughterB=ef17de14e77e88401d31bb1a26d8cd11 in 759 msec 2024-11-13T13:37:29,584 INFO [RS:0;bfeb2336aed7:38995-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 2 (all) file(s) in ef17de14e77e88401d31bb1a26d8cd11/info of ef17de14e77e88401d31bb1a26d8cd11 into 2c805ed796c8497e87b801daea923b63(size=26.1 K), total size for store is 26.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-13T13:37:29,584 DEBUG [RS:0;bfeb2336aed7:38995-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for ef17de14e77e88401d31bb1a26d8cd11: 2024-11-13T13:37:29,584 INFO [RS:0;bfeb2336aed7:38995-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731505048821.ef17de14e77e88401d31bb1a26d8cd11., storeName=ef17de14e77e88401d31bb1a26d8cd11/info, priority=14, startTime=1731505049525; duration=0sec 2024-11-13T13:37:29,584 DEBUG [RS:0;bfeb2336aed7:38995-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-13T13:37:29,584 DEBUG [RS:0;bfeb2336aed7:38995-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ef17de14e77e88401d31bb1a26d8cd11:info 2024-11-13T13:37:29,586 INFO [RS:0;bfeb2336aed7:38995-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0f561145611ba79a8558052544d3d0a2#info#compaction#65 average throughput is 31.30 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-13T13:37:29,587 DEBUG [RS:0;bfeb2336aed7:38995-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/0f561145611ba79a8558052544d3d0a2/.tmp/info/c68f24750bba4a779e72674a809dc322 is 1080, key is row0001/info:/1731505044189/Put/seqid=0 2024-11-13T13:37:29,591 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33351 is added to blk_1073741851_1027 (size=70862) 2024-11-13T13:37:29,591 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40741 is added to blk_1073741851_1027 (size=70862) 2024-11-13T13:37:29,597 DEBUG [RS:0;bfeb2336aed7:38995-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/0f561145611ba79a8558052544d3d0a2/.tmp/info/c68f24750bba4a779e72674a809dc322 as hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/0f561145611ba79a8558052544d3d0a2/info/c68f24750bba4a779e72674a809dc322 2024-11-13T13:37:29,599 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/hbase/meta/1588230740/.tmp/ns/18238d93438548a6abb05fd2399024fd is 43, key is default/ns:d/1731505034092/Put/seqid=0 2024-11-13T13:37:29,604 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33351 is added to blk_1073741852_1028 (size=5153) 2024-11-13T13:37:29,604 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40741 is added to blk_1073741852_1028 (size=5153) 2024-11-13T13:37:29,604 INFO [RS:0;bfeb2336aed7:38995-longCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 1 (all) file(s) in 0f561145611ba79a8558052544d3d0a2/info of 0f561145611ba79a8558052544d3d0a2 into c68f24750bba4a779e72674a809dc322(size=69.2 K), total size for store is 69.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-13T13:37:29,604 DEBUG [RS:0;bfeb2336aed7:38995-longCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 0f561145611ba79a8558052544d3d0a2: 2024-11-13T13:37:29,604 INFO [RS:0;bfeb2336aed7:38995-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1731505048821.0f561145611ba79a8558052544d3d0a2., storeName=0f561145611ba79a8558052544d3d0a2/info, priority=15, startTime=1731505049561; duration=0sec 2024-11-13T13:37:29,605 DEBUG [RS:0;bfeb2336aed7:38995-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-13T13:37:29,605 DEBUG [RS:0;bfeb2336aed7:38995-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0f561145611ba79a8558052544d3d0a2:info 2024-11-13T13:37:29,605 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/hbase/meta/1588230740/.tmp/ns/18238d93438548a6abb05fd2399024fd 2024-11-13T13:37:29,624 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/hbase/meta/1588230740/.tmp/table/4d9e3ca008e848a6a9f30625eb64c94f is 65, key is TestLogRolling-testLogRolling/table:state/1731505034502/Put/seqid=0 2024-11-13T13:37:29,629 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40741 is added to blk_1073741853_1029 (size=5340) 2024-11-13T13:37:29,629 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33351 is added to blk_1073741853_1029 (size=5340) 2024-11-13T13:37:29,629 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=122 B at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/hbase/meta/1588230740/.tmp/table/4d9e3ca008e848a6a9f30625eb64c94f 2024-11-13T13:37:29,635 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/hbase/meta/1588230740/.tmp/info/d81e6e42a0c04587890d8f08dcf4e403 as hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/hbase/meta/1588230740/info/d81e6e42a0c04587890d8f08dcf4e403 2024-11-13T13:37:29,640 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/hbase/meta/1588230740/info/d81e6e42a0c04587890d8f08dcf4e403, entries=30, sequenceid=17, filesize=9.7 K 2024-11-13T13:37:29,642 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/hbase/meta/1588230740/.tmp/ns/18238d93438548a6abb05fd2399024fd as hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/hbase/meta/1588230740/ns/18238d93438548a6abb05fd2399024fd 2024-11-13T13:37:29,647 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/hbase/meta/1588230740/ns/18238d93438548a6abb05fd2399024fd, entries=2, sequenceid=17, filesize=5.0 K 2024-11-13T13:37:29,649 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/hbase/meta/1588230740/.tmp/table/4d9e3ca008e848a6a9f30625eb64c94f as hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/hbase/meta/1588230740/table/4d9e3ca008e848a6a9f30625eb64c94f 2024-11-13T13:37:29,654 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/hbase/meta/1588230740/table/4d9e3ca008e848a6a9f30625eb64c94f, entries=2, sequenceid=17, filesize=5.2 K 2024-11-13T13:37:29,655 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~5.15 KB/5269, heapSize ~8.70 KB/8912, currentSize=670 B/670 for 1588230740 in 124ms, sequenceid=17, compaction requested=false 2024-11-13T13:37:29,655 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-11-13T13:37:30,156 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,33523,1731504891245/bfeb2336aed7%2C33523%2C1731504891245.meta.1731504892258.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:37:30,156 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,38781,1731504892507/bfeb2336aed7%2C38781%2C1731504892507.1731504892747 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:37:31,157 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,38781,1731504892507/bfeb2336aed7%2C38781%2C1731504892507.1731504892747 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:37:31,157 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,33523,1731504891245/bfeb2336aed7%2C33523%2C1731504891245.meta.1731504892258.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:37:32,158 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,38781,1731504892507/bfeb2336aed7%2C38781%2C1731504892507.1731504892747 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:37:32,158 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,33523,1731504891245/bfeb2336aed7%2C33523%2C1731504891245.meta.1731504892258.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:37:33,159 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,38781,1731504892507/bfeb2336aed7%2C38781%2C1731504892507.1731504892747 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:37:33,159 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,33523,1731504891245/bfeb2336aed7%2C33523%2C1731504891245.meta.1731504892258.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:37:34,041 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:37:34,041 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:37:34,041 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:37:34,042 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:37:34,042 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:37:34,042 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:37:34,042 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:37:34,043 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:37:34,067 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:37:34,067 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:37:34,067 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:37:34,068 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:37:34,068 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:37:34,068 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:37:34,071 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:37:34,071 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:37:34,071 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:37:34,074 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:37:34,159 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,38781,1731504892507/bfeb2336aed7%2C38781%2C1731504892507.1731504892747 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:37:34,159 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,33523,1731504891245/bfeb2336aed7%2C33523%2C1731504891245.meta.1731504892258.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:37:34,580 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-13T13:37:34,581 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:37:34,581 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:37:34,582 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:37:34,582 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:37:34,582 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:37:34,582 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:37:34,583 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:37:34,583 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:37:34,612 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:37:34,612 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:37:34,612 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:37:34,612 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:37:34,612 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:37:34,613 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:37:34,617 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:37:34,617 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:37:34,617 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:37:34,620 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:37:35,160 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,33523,1731504891245/bfeb2336aed7%2C33523%2C1731504891245.meta.1731504892258.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:37:35,160 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,38781,1731504892507/bfeb2336aed7%2C38781%2C1731504892507.1731504892747 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:37:36,161 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,33523,1731504891245/bfeb2336aed7%2C33523%2C1731504891245.meta.1731504892258.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:37:36,161 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,38781,1731504892507/bfeb2336aed7%2C38781%2C1731504892507.1731504892747 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:37:37,161 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,38781,1731504892507/bfeb2336aed7%2C38781%2C1731504892507.1731504892747 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:37:37,161 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,33523,1731504891245/bfeb2336aed7%2C33523%2C1731504891245.meta.1731504892258.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:37:38,162 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,38781,1731504892507/bfeb2336aed7%2C38781%2C1731504892507.1731504892747 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:37:38,162 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,33523,1731504891245/bfeb2336aed7%2C33523%2C1731504891245.meta.1731504892258.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:37:38,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38995 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:34298 deadline: 1731505068452, exception=org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1731505034127.56afd5c2ff9ec93f3af082ccede46aee. is not online on bfeb2336aed7,38995,1731505032992 2024-11-13T13:37:38,453 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,,1731505034127.56afd5c2ff9ec93f3af082ccede46aee., hostname=bfeb2336aed7,38995,1731505032992, seqNum=2 , the old value is region=TestLogRolling-testLogRolling,,1731505034127.56afd5c2ff9ec93f3af082ccede46aee., hostname=bfeb2336aed7,38995,1731505032992, seqNum=2, error=org.apache.hadoop.hbase.NotServingRegionException: org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1731505034127.56afd5c2ff9ec93f3af082ccede46aee. is not online on bfeb2336aed7,38995,1731505032992 at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegionByEncodedName(HRegionServer.java:3186) at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegion(HRegionServer.java:3164) at org.apache.hadoop.hbase.regionserver.RSRpcServices.getRegion(RSRpcServices.java:1413) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2943) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-13T13:37:38,453 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,,1731505034127.56afd5c2ff9ec93f3af082ccede46aee., hostname=bfeb2336aed7,38995,1731505032992, seqNum=2 is org.apache.hadoop.hbase.NotServingRegionException: org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1731505034127.56afd5c2ff9ec93f3af082ccede46aee. is not online on bfeb2336aed7,38995,1731505032992 at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegionByEncodedName(HRegionServer.java:3186) at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegion(HRegionServer.java:3164) at org.apache.hadoop.hbase.regionserver.RSRpcServices.getRegion(RSRpcServices.java:1413) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2943) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-13T13:37:38,453 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(88): Try removing region=TestLogRolling-testLogRolling,,1731505034127.56afd5c2ff9ec93f3af082ccede46aee., hostname=bfeb2336aed7,38995,1731505032992, seqNum=2 from cache 2024-11-13T13:37:39,163 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,33523,1731504891245/bfeb2336aed7%2C33523%2C1731504891245.meta.1731504892258.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:37:39,163 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,38781,1731504892507/bfeb2336aed7%2C38781%2C1731504892507.1731504892747 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:37:40,164 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,33523,1731504891245/bfeb2336aed7%2C33523%2C1731504891245.meta.1731504892258.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:37:40,164 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,38781,1731504892507/bfeb2336aed7%2C38781%2C1731504892507.1731504892747 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:37:41,165 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,38781,1731504892507/bfeb2336aed7%2C38781%2C1731504892507.1731504892747 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:37:41,165 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,33523,1731504891245/bfeb2336aed7%2C33523%2C1731504891245.meta.1731504892258.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:37:42,166 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,33523,1731504891245/bfeb2336aed7%2C33523%2C1731504891245.meta.1731504892258.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:37:42,166 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,38781,1731504892507/bfeb2336aed7%2C38781%2C1731504892507.1731504892747 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:37:42,747 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-13T13:37:43,167 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,33523,1731504891245/bfeb2336aed7%2C33523%2C1731504891245.meta.1731504892258.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:37:43,167 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,38781,1731504892507/bfeb2336aed7%2C38781%2C1731504892507.1731504892747 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:37:44,168 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,38781,1731504892507/bfeb2336aed7%2C38781%2C1731504892507.1731504892747 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:37:44,168 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,33523,1731504891245/bfeb2336aed7%2C33523%2C1731504891245.meta.1731504892258.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:37:45,168 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,38781,1731504892507/bfeb2336aed7%2C38781%2C1731504892507.1731504892747 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:37:45,168 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,33523,1731504891245/bfeb2336aed7%2C33523%2C1731504891245.meta.1731504892258.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:37:46,169 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,33523,1731504891245/bfeb2336aed7%2C33523%2C1731504891245.meta.1731504892258.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:37:46,169 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,38781,1731504892507/bfeb2336aed7%2C38781%2C1731504892507.1731504892747 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:37:47,170 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,38781,1731504892507/bfeb2336aed7%2C38781%2C1731504892507.1731504892747 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:37:47,170 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,33523,1731504891245/bfeb2336aed7%2C33523%2C1731504891245.meta.1731504892258.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:37:48,171 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,33523,1731504891245/bfeb2336aed7%2C33523%2C1731504891245.meta.1731504892258.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:37:48,171 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,38781,1731504892507/bfeb2336aed7%2C38781%2C1731504892507.1731504892747 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:37:49,172 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,33523,1731504891245/bfeb2336aed7%2C33523%2C1731504891245.meta.1731504892258.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:37:49,172 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,38781,1731504892507/bfeb2336aed7%2C38781%2C1731504892507.1731504892747 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:37:50,173 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,33523,1731504891245/bfeb2336aed7%2C33523%2C1731504891245.meta.1731504892258.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:37:50,173 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,38781,1731504892507/bfeb2336aed7%2C38781%2C1731504892507.1731504892747 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:37:51,174 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,33523,1731504891245/bfeb2336aed7%2C33523%2C1731504891245.meta.1731504892258.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:37:51,174 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,38781,1731504892507/bfeb2336aed7%2C38781%2C1731504892507.1731504892747 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:37:52,175 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,33523,1731504891245/bfeb2336aed7%2C33523%2C1731504891245.meta.1731504892258.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:37:52,175 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,38781,1731504892507/bfeb2336aed7%2C38781%2C1731504892507.1731504892747 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:37:53,175 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,38781,1731504892507/bfeb2336aed7%2C38781%2C1731504892507.1731504892747 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:37:53,175 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,33523,1731504891245/bfeb2336aed7%2C33523%2C1731504891245.meta.1731504892258.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:37:54,111 INFO [master/bfeb2336aed7:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-13T13:37:54,111 INFO [master/bfeb2336aed7:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-13T13:37:54,176 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,38781,1731504892507/bfeb2336aed7%2C38781%2C1731504892507.1731504892747 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:37:54,176 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,33523,1731504891245/bfeb2336aed7%2C33523%2C1731504891245.meta.1731504892258.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:37:55,177 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,33523,1731504891245/bfeb2336aed7%2C33523%2C1731504891245.meta.1731504892258.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:37:55,177 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,38781,1731504892507/bfeb2336aed7%2C38781%2C1731504892507.1731504892747 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:37:56,178 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,38781,1731504892507/bfeb2336aed7%2C38781%2C1731504892507.1731504892747 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:37:56,178 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,33523,1731504891245/bfeb2336aed7%2C33523%2C1731504891245.meta.1731504892258.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:37:57,178 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,33523,1731504891245/bfeb2336aed7%2C33523%2C1731504891245.meta.1731504892258.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:37:57,178 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,38781,1731504892507/bfeb2336aed7%2C38781%2C1731504892507.1731504892747 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:37:58,179 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,33523,1731504891245/bfeb2336aed7%2C33523%2C1731504891245.meta.1731504892258.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:37:58,179 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,38781,1731504892507/bfeb2336aed7%2C38781%2C1731504892507.1731504892747 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:37:58,481 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRolling', row='row0082', locateType=CURRENT is [region=TestLogRolling-testLogRolling,row0062,1731505048821.ef17de14e77e88401d31bb1a26d8cd11., hostname=bfeb2336aed7,38995,1731505032992, seqNum=106] 2024-11-13T13:37:58,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38995 {}] regionserver.HRegion(8855): Flush requested on ef17de14e77e88401d31bb1a26d8cd11 2024-11-13T13:37:58,493 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing ef17de14e77e88401d31bb1a26d8cd11 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-13T13:37:58,499 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/ef17de14e77e88401d31bb1a26d8cd11/.tmp/info/8af1dff1c79a42abac30c8d03f809e6a is 1080, key is row0082/info:/1731505078482/Put/seqid=0 2024-11-13T13:37:58,503 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40741 is added to blk_1073741854_1030 (size=12509) 2024-11-13T13:37:58,503 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33351 is added to blk_1073741854_1030 (size=12509) 2024-11-13T13:37:58,505 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=116 (bloomFilter=true), to=hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/ef17de14e77e88401d31bb1a26d8cd11/.tmp/info/8af1dff1c79a42abac30c8d03f809e6a 2024-11-13T13:37:58,512 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/ef17de14e77e88401d31bb1a26d8cd11/.tmp/info/8af1dff1c79a42abac30c8d03f809e6a as hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/ef17de14e77e88401d31bb1a26d8cd11/info/8af1dff1c79a42abac30c8d03f809e6a 2024-11-13T13:37:58,517 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/ef17de14e77e88401d31bb1a26d8cd11/info/8af1dff1c79a42abac30c8d03f809e6a, entries=7, sequenceid=116, filesize=12.2 K 2024-11-13T13:37:58,518 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=8.41 KB/8608 for ef17de14e77e88401d31bb1a26d8cd11 in 25ms, sequenceid=116, compaction requested=false 2024-11-13T13:37:58,518 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for ef17de14e77e88401d31bb1a26d8cd11: 2024-11-13T13:37:59,022 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 1588230740, had cached 0 bytes from a total of 20375 2024-11-13T13:37:59,180 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,33523,1731504891245/bfeb2336aed7%2C33523%2C1731504891245.meta.1731504892258.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:37:59,180 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,38781,1731504892507/bfeb2336aed7%2C38781%2C1731504892507.1731504892747 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:38:00,181 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,33523,1731504891245/bfeb2336aed7%2C33523%2C1731504891245.meta.1731504892258.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:38:00,181 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,38781,1731504892507/bfeb2336aed7%2C38781%2C1731504892507.1731504892747 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:38:00,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38995 {}] regionserver.HRegion(8855): Flush requested on ef17de14e77e88401d31bb1a26d8cd11 2024-11-13T13:38:00,514 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing ef17de14e77e88401d31bb1a26d8cd11 1/1 column families, dataSize=9.46 KB heapSize=10.38 KB 2024-11-13T13:38:00,520 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/ef17de14e77e88401d31bb1a26d8cd11/.tmp/info/f095059102a34efab22e66fa44a331c6 is 1080, key is row0089/info:/1731505078494/Put/seqid=0 2024-11-13T13:38:00,526 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40741 is added to blk_1073741855_1031 (size=14663) 2024-11-13T13:38:00,526 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33351 is added to blk_1073741855_1031 (size=14663) 2024-11-13T13:38:00,527 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=9.46 KB at sequenceid=128 (bloomFilter=true), to=hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/ef17de14e77e88401d31bb1a26d8cd11/.tmp/info/f095059102a34efab22e66fa44a331c6 2024-11-13T13:38:00,533 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/ef17de14e77e88401d31bb1a26d8cd11/.tmp/info/f095059102a34efab22e66fa44a331c6 as hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/ef17de14e77e88401d31bb1a26d8cd11/info/f095059102a34efab22e66fa44a331c6 2024-11-13T13:38:00,539 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/ef17de14e77e88401d31bb1a26d8cd11/info/f095059102a34efab22e66fa44a331c6, entries=9, sequenceid=128, filesize=14.3 K 2024-11-13T13:38:00,540 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~9.46 KB/9684, heapSize ~10.36 KB/10608, currentSize=11.56 KB/11836 for ef17de14e77e88401d31bb1a26d8cd11 in 26ms, sequenceid=128, compaction requested=true 2024-11-13T13:38:00,540 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for ef17de14e77e88401d31bb1a26d8cd11: 2024-11-13T13:38:00,541 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ef17de14e77e88401d31bb1a26d8cd11:info, priority=-2147483648, current under compaction store size is 1 2024-11-13T13:38:00,541 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-13T13:38:00,541 DEBUG [RS:0;bfeb2336aed7:38995-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-13T13:38:00,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38995 {}] regionserver.HRegion(8855): Flush requested on ef17de14e77e88401d31bb1a26d8cd11 2024-11-13T13:38:00,542 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing ef17de14e77e88401d31bb1a26d8cd11 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-11-13T13:38:00,542 DEBUG [RS:0;bfeb2336aed7:38995-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 53868 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-13T13:38:00,542 DEBUG [RS:0;bfeb2336aed7:38995-shortCompactions-0 {}] regionserver.HStore(1541): ef17de14e77e88401d31bb1a26d8cd11/info is initiating minor compaction (all files) 2024-11-13T13:38:00,542 INFO [RS:0;bfeb2336aed7:38995-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of ef17de14e77e88401d31bb1a26d8cd11/info in TestLogRolling-testLogRolling,row0062,1731505048821.ef17de14e77e88401d31bb1a26d8cd11. 2024-11-13T13:38:00,542 INFO [RS:0;bfeb2336aed7:38995-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/ef17de14e77e88401d31bb1a26d8cd11/info/2c805ed796c8497e87b801daea923b63, hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/ef17de14e77e88401d31bb1a26d8cd11/info/8af1dff1c79a42abac30c8d03f809e6a, hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/ef17de14e77e88401d31bb1a26d8cd11/info/f095059102a34efab22e66fa44a331c6] into tmpdir=hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/ef17de14e77e88401d31bb1a26d8cd11/.tmp, totalSize=52.6 K 2024-11-13T13:38:00,543 DEBUG [RS:0;bfeb2336aed7:38995-shortCompactions-0 {}] compactions.Compactor(225): Compacting 2c805ed796c8497e87b801daea923b63, keycount=20, bloomtype=ROW, size=26.1 K, encoding=NONE, compression=NONE, seqNum=102, earliestPutTs=1731505046351 2024-11-13T13:38:00,543 DEBUG [RS:0;bfeb2336aed7:38995-shortCompactions-0 {}] compactions.Compactor(225): Compacting 8af1dff1c79a42abac30c8d03f809e6a, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=116, earliestPutTs=1731505078482 2024-11-13T13:38:00,543 DEBUG [RS:0;bfeb2336aed7:38995-shortCompactions-0 {}] compactions.Compactor(225): Compacting f095059102a34efab22e66fa44a331c6, keycount=9, bloomtype=ROW, size=14.3 K, encoding=NONE, compression=NONE, seqNum=128, earliestPutTs=1731505078494 2024-11-13T13:38:00,546 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/ef17de14e77e88401d31bb1a26d8cd11/.tmp/info/54a53c3607bb45af9c52902fe868b73d is 1080, key is row0098/info:/1731505080516/Put/seqid=0 2024-11-13T13:38:00,558 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40741 is added to blk_1073741856_1032 (size=17906) 2024-11-13T13:38:00,558 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33351 is added to blk_1073741856_1032 (size=17906) 2024-11-13T13:38:00,560 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=143 (bloomFilter=true), to=hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/ef17de14e77e88401d31bb1a26d8cd11/.tmp/info/54a53c3607bb45af9c52902fe868b73d 2024-11-13T13:38:00,563 INFO [RS:0;bfeb2336aed7:38995-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): ef17de14e77e88401d31bb1a26d8cd11#info#compaction#71 average throughput is 36.94 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-13T13:38:00,564 DEBUG [RS:0;bfeb2336aed7:38995-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/ef17de14e77e88401d31bb1a26d8cd11/.tmp/info/a34603311abb4c34ac34edc6c8aef0cc is 1080, key is row0062/info:/1731505046351/Put/seqid=0 2024-11-13T13:38:00,571 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/ef17de14e77e88401d31bb1a26d8cd11/.tmp/info/54a53c3607bb45af9c52902fe868b73d as hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/ef17de14e77e88401d31bb1a26d8cd11/info/54a53c3607bb45af9c52902fe868b73d 2024-11-13T13:38:00,578 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/ef17de14e77e88401d31bb1a26d8cd11/info/54a53c3607bb45af9c52902fe868b73d, entries=12, sequenceid=143, filesize=17.5 K 2024-11-13T13:38:00,579 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=15.76 KB/16140 for ef17de14e77e88401d31bb1a26d8cd11 in 38ms, sequenceid=143, compaction requested=false 2024-11-13T13:38:00,579 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for ef17de14e77e88401d31bb1a26d8cd11: 2024-11-13T13:38:00,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38995 {}] regionserver.HRegion(8855): Flush requested on ef17de14e77e88401d31bb1a26d8cd11 2024-11-13T13:38:00,581 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing ef17de14e77e88401d31bb1a26d8cd11 1/1 column families, dataSize=16.81 KB heapSize=18.25 KB 2024-11-13T13:38:00,586 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40741 is added to blk_1073741857_1033 (size=44066) 2024-11-13T13:38:00,586 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33351 is added to blk_1073741857_1033 (size=44066) 2024-11-13T13:38:00,587 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/ef17de14e77e88401d31bb1a26d8cd11/.tmp/info/c611315fa13c49acbf5562de6f2c0687 is 1080, key is row0110/info:/1731505080543/Put/seqid=0 2024-11-13T13:38:00,593 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40741 is added to blk_1073741858_1034 (size=22238) 2024-11-13T13:38:00,594 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33351 is added to blk_1073741858_1034 (size=22238) 2024-11-13T13:38:00,594 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=16.81 KB at sequenceid=162 (bloomFilter=true), to=hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/ef17de14e77e88401d31bb1a26d8cd11/.tmp/info/c611315fa13c49acbf5562de6f2c0687 2024-11-13T13:38:00,595 DEBUG [RS:0;bfeb2336aed7:38995-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/ef17de14e77e88401d31bb1a26d8cd11/.tmp/info/a34603311abb4c34ac34edc6c8aef0cc as hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/ef17de14e77e88401d31bb1a26d8cd11/info/a34603311abb4c34ac34edc6c8aef0cc 2024-11-13T13:38:00,600 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/ef17de14e77e88401d31bb1a26d8cd11/.tmp/info/c611315fa13c49acbf5562de6f2c0687 as hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/ef17de14e77e88401d31bb1a26d8cd11/info/c611315fa13c49acbf5562de6f2c0687 2024-11-13T13:38:00,602 INFO [RS:0;bfeb2336aed7:38995-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in ef17de14e77e88401d31bb1a26d8cd11/info of ef17de14e77e88401d31bb1a26d8cd11 into a34603311abb4c34ac34edc6c8aef0cc(size=43.0 K), total size for store is 60.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-13T13:38:00,602 DEBUG [RS:0;bfeb2336aed7:38995-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for ef17de14e77e88401d31bb1a26d8cd11: 2024-11-13T13:38:00,602 INFO [RS:0;bfeb2336aed7:38995-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731505048821.ef17de14e77e88401d31bb1a26d8cd11., storeName=ef17de14e77e88401d31bb1a26d8cd11/info, priority=13, startTime=1731505080541; duration=0sec 2024-11-13T13:38:00,602 DEBUG [RS:0;bfeb2336aed7:38995-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-13T13:38:00,602 DEBUG [RS:0;bfeb2336aed7:38995-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ef17de14e77e88401d31bb1a26d8cd11:info 2024-11-13T13:38:00,606 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/ef17de14e77e88401d31bb1a26d8cd11/info/c611315fa13c49acbf5562de6f2c0687, entries=16, sequenceid=162, filesize=21.7 K 2024-11-13T13:38:00,607 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~16.81 KB/17216, heapSize ~18.23 KB/18672, currentSize=3.15 KB/3228 for ef17de14e77e88401d31bb1a26d8cd11 in 26ms, sequenceid=162, compaction requested=true 2024-11-13T13:38:00,607 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for ef17de14e77e88401d31bb1a26d8cd11: 2024-11-13T13:38:00,607 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ef17de14e77e88401d31bb1a26d8cd11:info, priority=-2147483648, current under compaction store size is 1 2024-11-13T13:38:00,607 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-13T13:38:00,607 DEBUG [RS:0;bfeb2336aed7:38995-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-13T13:38:00,609 DEBUG [RS:0;bfeb2336aed7:38995-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 84210 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-13T13:38:00,609 DEBUG [RS:0;bfeb2336aed7:38995-shortCompactions-0 {}] regionserver.HStore(1541): ef17de14e77e88401d31bb1a26d8cd11/info is initiating minor compaction (all files) 2024-11-13T13:38:00,609 INFO [RS:0;bfeb2336aed7:38995-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of ef17de14e77e88401d31bb1a26d8cd11/info in TestLogRolling-testLogRolling,row0062,1731505048821.ef17de14e77e88401d31bb1a26d8cd11. 2024-11-13T13:38:00,609 INFO [RS:0;bfeb2336aed7:38995-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/ef17de14e77e88401d31bb1a26d8cd11/info/a34603311abb4c34ac34edc6c8aef0cc, hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/ef17de14e77e88401d31bb1a26d8cd11/info/54a53c3607bb45af9c52902fe868b73d, hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/ef17de14e77e88401d31bb1a26d8cd11/info/c611315fa13c49acbf5562de6f2c0687] into tmpdir=hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/ef17de14e77e88401d31bb1a26d8cd11/.tmp, totalSize=82.2 K 2024-11-13T13:38:00,609 DEBUG [RS:0;bfeb2336aed7:38995-shortCompactions-0 {}] compactions.Compactor(225): Compacting a34603311abb4c34ac34edc6c8aef0cc, keycount=36, bloomtype=ROW, size=43.0 K, encoding=NONE, compression=NONE, seqNum=128, earliestPutTs=1731505046351 2024-11-13T13:38:00,610 DEBUG [RS:0;bfeb2336aed7:38995-shortCompactions-0 {}] compactions.Compactor(225): Compacting 54a53c3607bb45af9c52902fe868b73d, keycount=12, bloomtype=ROW, size=17.5 K, encoding=NONE, compression=NONE, seqNum=143, earliestPutTs=1731505080516 2024-11-13T13:38:00,610 DEBUG [RS:0;bfeb2336aed7:38995-shortCompactions-0 {}] compactions.Compactor(225): Compacting c611315fa13c49acbf5562de6f2c0687, keycount=16, bloomtype=ROW, size=21.7 K, encoding=NONE, compression=NONE, seqNum=162, earliestPutTs=1731505080543 2024-11-13T13:38:00,621 INFO [RS:0;bfeb2336aed7:38995-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): ef17de14e77e88401d31bb1a26d8cd11#info#compaction#73 average throughput is 32.84 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-13T13:38:00,622 DEBUG [RS:0;bfeb2336aed7:38995-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/ef17de14e77e88401d31bb1a26d8cd11/.tmp/info/eb9fd19fb35c4a8ea6dcc9aa4216c544 is 1080, key is row0062/info:/1731505046351/Put/seqid=0 2024-11-13T13:38:00,630 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33351 is added to blk_1073741859_1035 (size=74493) 2024-11-13T13:38:00,630 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40741 is added to blk_1073741859_1035 (size=74493) 2024-11-13T13:38:00,637 DEBUG [RS:0;bfeb2336aed7:38995-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/ef17de14e77e88401d31bb1a26d8cd11/.tmp/info/eb9fd19fb35c4a8ea6dcc9aa4216c544 as hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/ef17de14e77e88401d31bb1a26d8cd11/info/eb9fd19fb35c4a8ea6dcc9aa4216c544 2024-11-13T13:38:00,644 INFO [RS:0;bfeb2336aed7:38995-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in ef17de14e77e88401d31bb1a26d8cd11/info of ef17de14e77e88401d31bb1a26d8cd11 into eb9fd19fb35c4a8ea6dcc9aa4216c544(size=72.7 K), total size for store is 72.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-13T13:38:00,644 DEBUG [RS:0;bfeb2336aed7:38995-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for ef17de14e77e88401d31bb1a26d8cd11: 2024-11-13T13:38:00,644 INFO [RS:0;bfeb2336aed7:38995-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731505048821.ef17de14e77e88401d31bb1a26d8cd11., storeName=ef17de14e77e88401d31bb1a26d8cd11/info, priority=13, startTime=1731505080607; duration=0sec 2024-11-13T13:38:00,644 DEBUG [RS:0;bfeb2336aed7:38995-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-13T13:38:00,644 DEBUG [RS:0;bfeb2336aed7:38995-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ef17de14e77e88401d31bb1a26d8cd11:info 2024-11-13T13:38:01,182 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,33523,1731504891245/bfeb2336aed7%2C33523%2C1731504891245.meta.1731504892258.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:38:01,182 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,38781,1731504892507/bfeb2336aed7%2C38781%2C1731504892507.1731504892747 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:38:02,183 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,33523,1731504891245/bfeb2336aed7%2C33523%2C1731504891245.meta.1731504892258.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:38:02,183 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,38781,1731504892507/bfeb2336aed7%2C38781%2C1731504892507.1731504892747 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:38:02,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38995 {}] regionserver.HRegion(8855): Flush requested on ef17de14e77e88401d31bb1a26d8cd11 2024-11-13T13:38:02,600 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing ef17de14e77e88401d31bb1a26d8cd11 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-13T13:38:02,617 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/ef17de14e77e88401d31bb1a26d8cd11/.tmp/info/b5d211440b15465991fca94e5f06dd55 is 1080, key is row0126/info:/1731505080582/Put/seqid=0 2024-11-13T13:38:02,627 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40741 is added to blk_1073741860_1036 (size=12516) 2024-11-13T13:38:02,627 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33351 is added to blk_1073741860_1036 (size=12516) 2024-11-13T13:38:02,628 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=174 (bloomFilter=true), to=hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/ef17de14e77e88401d31bb1a26d8cd11/.tmp/info/b5d211440b15465991fca94e5f06dd55 2024-11-13T13:38:02,634 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/ef17de14e77e88401d31bb1a26d8cd11/.tmp/info/b5d211440b15465991fca94e5f06dd55 as hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/ef17de14e77e88401d31bb1a26d8cd11/info/b5d211440b15465991fca94e5f06dd55 2024-11-13T13:38:02,639 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/ef17de14e77e88401d31bb1a26d8cd11/info/b5d211440b15465991fca94e5f06dd55, entries=7, sequenceid=174, filesize=12.2 K 2024-11-13T13:38:02,640 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=19.96 KB/20444 for ef17de14e77e88401d31bb1a26d8cd11 in 40ms, sequenceid=174, compaction requested=false 2024-11-13T13:38:02,640 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for ef17de14e77e88401d31bb1a26d8cd11: 2024-11-13T13:38:02,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38995 {}] regionserver.HRegion(8855): Flush requested on ef17de14e77e88401d31bb1a26d8cd11 2024-11-13T13:38:02,641 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing ef17de14e77e88401d31bb1a26d8cd11 1/1 column families, dataSize=21.02 KB heapSize=22.75 KB 2024-11-13T13:38:02,645 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/ef17de14e77e88401d31bb1a26d8cd11/.tmp/info/d8d5a23485e64a53aff8a97c34224469 is 1080, key is row0133/info:/1731505082601/Put/seqid=0 2024-11-13T13:38:02,650 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40741 is added to blk_1073741861_1037 (size=26550) 2024-11-13T13:38:02,650 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33351 is added to blk_1073741861_1037 (size=26550) 2024-11-13T13:38:02,651 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=21.02 KB at sequenceid=197 (bloomFilter=true), to=hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/ef17de14e77e88401d31bb1a26d8cd11/.tmp/info/d8d5a23485e64a53aff8a97c34224469 2024-11-13T13:38:02,657 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/ef17de14e77e88401d31bb1a26d8cd11/.tmp/info/d8d5a23485e64a53aff8a97c34224469 as hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/ef17de14e77e88401d31bb1a26d8cd11/info/d8d5a23485e64a53aff8a97c34224469 2024-11-13T13:38:02,662 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/ef17de14e77e88401d31bb1a26d8cd11/info/d8d5a23485e64a53aff8a97c34224469, entries=20, sequenceid=197, filesize=25.9 K 2024-11-13T13:38:02,663 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~21.02 KB/21520, heapSize ~22.73 KB/23280, currentSize=8.41 KB/8608 for ef17de14e77e88401d31bb1a26d8cd11 in 22ms, sequenceid=197, compaction requested=true 2024-11-13T13:38:02,663 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for ef17de14e77e88401d31bb1a26d8cd11: 2024-11-13T13:38:02,664 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ef17de14e77e88401d31bb1a26d8cd11:info, priority=-2147483648, current under compaction store size is 1 2024-11-13T13:38:02,664 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-13T13:38:02,664 DEBUG [RS:0;bfeb2336aed7:38995-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-13T13:38:02,665 DEBUG [RS:0;bfeb2336aed7:38995-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 113559 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-13T13:38:02,665 DEBUG [RS:0;bfeb2336aed7:38995-shortCompactions-0 {}] regionserver.HStore(1541): ef17de14e77e88401d31bb1a26d8cd11/info is initiating minor compaction (all files) 2024-11-13T13:38:02,665 INFO [RS:0;bfeb2336aed7:38995-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of ef17de14e77e88401d31bb1a26d8cd11/info in TestLogRolling-testLogRolling,row0062,1731505048821.ef17de14e77e88401d31bb1a26d8cd11. 2024-11-13T13:38:02,665 INFO [RS:0;bfeb2336aed7:38995-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/ef17de14e77e88401d31bb1a26d8cd11/info/eb9fd19fb35c4a8ea6dcc9aa4216c544, hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/ef17de14e77e88401d31bb1a26d8cd11/info/b5d211440b15465991fca94e5f06dd55, hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/ef17de14e77e88401d31bb1a26d8cd11/info/d8d5a23485e64a53aff8a97c34224469] into tmpdir=hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/ef17de14e77e88401d31bb1a26d8cd11/.tmp, totalSize=110.9 K 2024-11-13T13:38:02,665 DEBUG [RS:0;bfeb2336aed7:38995-shortCompactions-0 {}] compactions.Compactor(225): Compacting eb9fd19fb35c4a8ea6dcc9aa4216c544, keycount=64, bloomtype=ROW, size=72.7 K, encoding=NONE, compression=NONE, seqNum=162, earliestPutTs=1731505046351 2024-11-13T13:38:02,666 DEBUG [RS:0;bfeb2336aed7:38995-shortCompactions-0 {}] compactions.Compactor(225): Compacting b5d211440b15465991fca94e5f06dd55, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=174, earliestPutTs=1731505080582 2024-11-13T13:38:02,666 DEBUG [RS:0;bfeb2336aed7:38995-shortCompactions-0 {}] compactions.Compactor(225): Compacting d8d5a23485e64a53aff8a97c34224469, keycount=20, bloomtype=ROW, size=25.9 K, encoding=NONE, compression=NONE, seqNum=197, earliestPutTs=1731505082601 2024-11-13T13:38:02,678 INFO [RS:0;bfeb2336aed7:38995-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): ef17de14e77e88401d31bb1a26d8cd11#info#compaction#76 average throughput is 46.69 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-13T13:38:02,678 DEBUG [RS:0;bfeb2336aed7:38995-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/ef17de14e77e88401d31bb1a26d8cd11/.tmp/info/86f66082177e44c6a006afd4f69f0cd3 is 1080, key is row0062/info:/1731505046351/Put/seqid=0 2024-11-13T13:38:02,681 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40741 is added to blk_1073741862_1038 (size=103705) 2024-11-13T13:38:02,682 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33351 is added to blk_1073741862_1038 (size=103705) 2024-11-13T13:38:02,687 DEBUG [RS:0;bfeb2336aed7:38995-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/ef17de14e77e88401d31bb1a26d8cd11/.tmp/info/86f66082177e44c6a006afd4f69f0cd3 as hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/ef17de14e77e88401d31bb1a26d8cd11/info/86f66082177e44c6a006afd4f69f0cd3 2024-11-13T13:38:02,693 INFO [RS:0;bfeb2336aed7:38995-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in ef17de14e77e88401d31bb1a26d8cd11/info of ef17de14e77e88401d31bb1a26d8cd11 into 86f66082177e44c6a006afd4f69f0cd3(size=101.3 K), total size for store is 101.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-13T13:38:02,693 DEBUG [RS:0;bfeb2336aed7:38995-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for ef17de14e77e88401d31bb1a26d8cd11: 2024-11-13T13:38:02,693 INFO [RS:0;bfeb2336aed7:38995-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731505048821.ef17de14e77e88401d31bb1a26d8cd11., storeName=ef17de14e77e88401d31bb1a26d8cd11/info, priority=13, startTime=1731505082663; duration=0sec 2024-11-13T13:38:02,693 DEBUG [RS:0;bfeb2336aed7:38995-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-13T13:38:02,693 DEBUG [RS:0;bfeb2336aed7:38995-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ef17de14e77e88401d31bb1a26d8cd11:info 2024-11-13T13:38:03,184 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,33523,1731504891245/bfeb2336aed7%2C33523%2C1731504891245.meta.1731504892258.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:38:03,184 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,38781,1731504892507/bfeb2336aed7%2C38781%2C1731504892507.1731504892747 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:38:04,185 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,38781,1731504892507/bfeb2336aed7%2C38781%2C1731504892507.1731504892747 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:38:04,185 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,33523,1731504891245/bfeb2336aed7%2C33523%2C1731504891245.meta.1731504892258.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:38:04,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38995 {}] regionserver.HRegion(8855): Flush requested on ef17de14e77e88401d31bb1a26d8cd11 2024-11-13T13:38:04,663 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing ef17de14e77e88401d31bb1a26d8cd11 1/1 column families, dataSize=9.46 KB heapSize=10.38 KB 2024-11-13T13:38:04,669 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/ef17de14e77e88401d31bb1a26d8cd11/.tmp/info/dec47b598cb949e5a1a46ba435b57e2c is 1080, key is row0153/info:/1731505082642/Put/seqid=0 2024-11-13T13:38:04,675 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33351 is added to blk_1073741863_1039 (size=14672) 2024-11-13T13:38:04,675 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40741 is added to blk_1073741863_1039 (size=14672) 2024-11-13T13:38:04,676 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=9.46 KB at sequenceid=210 (bloomFilter=true), to=hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/ef17de14e77e88401d31bb1a26d8cd11/.tmp/info/dec47b598cb949e5a1a46ba435b57e2c 2024-11-13T13:38:04,683 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/ef17de14e77e88401d31bb1a26d8cd11/.tmp/info/dec47b598cb949e5a1a46ba435b57e2c as hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/ef17de14e77e88401d31bb1a26d8cd11/info/dec47b598cb949e5a1a46ba435b57e2c 2024-11-13T13:38:04,689 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/ef17de14e77e88401d31bb1a26d8cd11/info/dec47b598cb949e5a1a46ba435b57e2c, entries=9, sequenceid=210, filesize=14.3 K 2024-11-13T13:38:04,690 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~9.46 KB/9684, heapSize ~10.36 KB/10608, currentSize=11.56 KB/11836 for ef17de14e77e88401d31bb1a26d8cd11 in 26ms, sequenceid=210, compaction requested=false 2024-11-13T13:38:04,690 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for ef17de14e77e88401d31bb1a26d8cd11: 2024-11-13T13:38:04,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38995 {}] regionserver.HRegion(8855): Flush requested on ef17de14e77e88401d31bb1a26d8cd11 2024-11-13T13:38:04,690 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing ef17de14e77e88401d31bb1a26d8cd11 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-11-13T13:38:04,694 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/ef17de14e77e88401d31bb1a26d8cd11/.tmp/info/533adfa15338402d871f217c4d5114d2 is 1080, key is row0162/info:/1731505084665/Put/seqid=0 2024-11-13T13:38:04,700 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40741 is added to blk_1073741864_1040 (size=17906) 2024-11-13T13:38:04,700 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33351 is added to blk_1073741864_1040 (size=17906) 2024-11-13T13:38:04,701 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=225 (bloomFilter=true), to=hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/ef17de14e77e88401d31bb1a26d8cd11/.tmp/info/533adfa15338402d871f217c4d5114d2 2024-11-13T13:38:04,708 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/ef17de14e77e88401d31bb1a26d8cd11/.tmp/info/533adfa15338402d871f217c4d5114d2 as hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/ef17de14e77e88401d31bb1a26d8cd11/info/533adfa15338402d871f217c4d5114d2 2024-11-13T13:38:04,714 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/ef17de14e77e88401d31bb1a26d8cd11/info/533adfa15338402d871f217c4d5114d2, entries=12, sequenceid=225, filesize=17.5 K 2024-11-13T13:38:04,715 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=11.56 KB/11836 for ef17de14e77e88401d31bb1a26d8cd11 in 25ms, sequenceid=225, compaction requested=true 2024-11-13T13:38:04,715 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for ef17de14e77e88401d31bb1a26d8cd11: 2024-11-13T13:38:04,715 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ef17de14e77e88401d31bb1a26d8cd11:info, priority=-2147483648, current under compaction store size is 1 2024-11-13T13:38:04,715 DEBUG [RS:0;bfeb2336aed7:38995-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-13T13:38:04,715 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-13T13:38:04,716 DEBUG [RS:0;bfeb2336aed7:38995-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 136283 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-13T13:38:04,716 DEBUG [RS:0;bfeb2336aed7:38995-shortCompactions-0 {}] regionserver.HStore(1541): ef17de14e77e88401d31bb1a26d8cd11/info is initiating minor compaction (all files) 2024-11-13T13:38:04,716 INFO [RS:0;bfeb2336aed7:38995-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of ef17de14e77e88401d31bb1a26d8cd11/info in TestLogRolling-testLogRolling,row0062,1731505048821.ef17de14e77e88401d31bb1a26d8cd11. 2024-11-13T13:38:04,717 INFO [RS:0;bfeb2336aed7:38995-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/ef17de14e77e88401d31bb1a26d8cd11/info/86f66082177e44c6a006afd4f69f0cd3, hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/ef17de14e77e88401d31bb1a26d8cd11/info/dec47b598cb949e5a1a46ba435b57e2c, hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/ef17de14e77e88401d31bb1a26d8cd11/info/533adfa15338402d871f217c4d5114d2] into tmpdir=hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/ef17de14e77e88401d31bb1a26d8cd11/.tmp, totalSize=133.1 K 2024-11-13T13:38:04,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38995 {}] regionserver.HRegion(8855): Flush requested on ef17de14e77e88401d31bb1a26d8cd11 2024-11-13T13:38:04,717 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing ef17de14e77e88401d31bb1a26d8cd11 1/1 column families, dataSize=13.66 KB heapSize=14.88 KB 2024-11-13T13:38:04,717 DEBUG [RS:0;bfeb2336aed7:38995-shortCompactions-0 {}] compactions.Compactor(225): Compacting 86f66082177e44c6a006afd4f69f0cd3, keycount=91, bloomtype=ROW, size=101.3 K, encoding=NONE, compression=NONE, seqNum=197, earliestPutTs=1731505046351 2024-11-13T13:38:04,718 DEBUG [RS:0;bfeb2336aed7:38995-shortCompactions-0 {}] compactions.Compactor(225): Compacting dec47b598cb949e5a1a46ba435b57e2c, keycount=9, bloomtype=ROW, size=14.3 K, encoding=NONE, compression=NONE, seqNum=210, earliestPutTs=1731505082642 2024-11-13T13:38:04,718 DEBUG [RS:0;bfeb2336aed7:38995-shortCompactions-0 {}] compactions.Compactor(225): Compacting 533adfa15338402d871f217c4d5114d2, keycount=12, bloomtype=ROW, size=17.5 K, encoding=NONE, compression=NONE, seqNum=225, earliestPutTs=1731505084665 2024-11-13T13:38:04,721 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/ef17de14e77e88401d31bb1a26d8cd11/.tmp/info/a63bbd9561c8456b8f028e5f1601d6c5 is 1080, key is row0174/info:/1731505084691/Put/seqid=0 2024-11-13T13:38:04,740 INFO [RS:0;bfeb2336aed7:38995-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): ef17de14e77e88401d31bb1a26d8cd11#info#compaction#80 average throughput is 57.46 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-13T13:38:04,740 DEBUG [RS:0;bfeb2336aed7:38995-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/ef17de14e77e88401d31bb1a26d8cd11/.tmp/info/07f0c591b3b6420583b4429fa3c33ec0 is 1080, key is row0062/info:/1731505046351/Put/seqid=0 2024-11-13T13:38:04,746 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33351 is added to blk_1073741865_1041 (size=19000) 2024-11-13T13:38:04,746 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40741 is added to blk_1073741865_1041 (size=19000) 2024-11-13T13:38:04,747 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.66 KB at sequenceid=241 (bloomFilter=true), to=hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/ef17de14e77e88401d31bb1a26d8cd11/.tmp/info/a63bbd9561c8456b8f028e5f1601d6c5 2024-11-13T13:38:04,752 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/ef17de14e77e88401d31bb1a26d8cd11/.tmp/info/a63bbd9561c8456b8f028e5f1601d6c5 as hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/ef17de14e77e88401d31bb1a26d8cd11/info/a63bbd9561c8456b8f028e5f1601d6c5 2024-11-13T13:38:04,757 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/ef17de14e77e88401d31bb1a26d8cd11/info/a63bbd9561c8456b8f028e5f1601d6c5, entries=13, sequenceid=241, filesize=18.6 K 2024-11-13T13:38:04,758 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~13.66 KB/13988, heapSize ~14.86 KB/15216, currentSize=6.30 KB/6456 for ef17de14e77e88401d31bb1a26d8cd11 in 41ms, sequenceid=241, compaction requested=false 2024-11-13T13:38:04,759 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for ef17de14e77e88401d31bb1a26d8cd11: 2024-11-13T13:38:04,762 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40741 is added to blk_1073741866_1042 (size=126581) 2024-11-13T13:38:04,762 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33351 is added to blk_1073741866_1042 (size=126581) 2024-11-13T13:38:04,768 DEBUG [RS:0;bfeb2336aed7:38995-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/ef17de14e77e88401d31bb1a26d8cd11/.tmp/info/07f0c591b3b6420583b4429fa3c33ec0 as hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/ef17de14e77e88401d31bb1a26d8cd11/info/07f0c591b3b6420583b4429fa3c33ec0 2024-11-13T13:38:04,773 INFO [RS:0;bfeb2336aed7:38995-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in ef17de14e77e88401d31bb1a26d8cd11/info of ef17de14e77e88401d31bb1a26d8cd11 into 07f0c591b3b6420583b4429fa3c33ec0(size=123.6 K), total size for store is 142.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-13T13:38:04,773 DEBUG [RS:0;bfeb2336aed7:38995-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for ef17de14e77e88401d31bb1a26d8cd11: 2024-11-13T13:38:04,773 INFO [RS:0;bfeb2336aed7:38995-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731505048821.ef17de14e77e88401d31bb1a26d8cd11., storeName=ef17de14e77e88401d31bb1a26d8cd11/info, priority=13, startTime=1731505084715; duration=0sec 2024-11-13T13:38:04,773 DEBUG [RS:0;bfeb2336aed7:38995-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-13T13:38:04,773 DEBUG [RS:0;bfeb2336aed7:38995-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ef17de14e77e88401d31bb1a26d8cd11:info 2024-11-13T13:38:05,185 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,38781,1731504892507/bfeb2336aed7%2C38781%2C1731504892507.1731504892747 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:38:05,185 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,33523,1731504891245/bfeb2336aed7%2C33523%2C1731504891245.meta.1731504892258.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:38:06,186 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,33523,1731504891245/bfeb2336aed7%2C33523%2C1731504891245.meta.1731504892258.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:38:06,186 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,38781,1731504892507/bfeb2336aed7%2C38781%2C1731504892507.1731504892747 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:38:06,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38995 {}] regionserver.HRegion(8855): Flush requested on ef17de14e77e88401d31bb1a26d8cd11 2024-11-13T13:38:06,736 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing ef17de14e77e88401d31bb1a26d8cd11 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-13T13:38:06,743 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/ef17de14e77e88401d31bb1a26d8cd11/.tmp/info/a1907788811a426ab867c8438b0513f3 is 1080, key is row0187/info:/1731505084718/Put/seqid=0 2024-11-13T13:38:06,749 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40741 is added to blk_1073741867_1043 (size=12516) 2024-11-13T13:38:06,750 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33351 is added to blk_1073741867_1043 (size=12516) 2024-11-13T13:38:06,751 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=252 (bloomFilter=true), to=hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/ef17de14e77e88401d31bb1a26d8cd11/.tmp/info/a1907788811a426ab867c8438b0513f3 2024-11-13T13:38:06,757 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/ef17de14e77e88401d31bb1a26d8cd11/.tmp/info/a1907788811a426ab867c8438b0513f3 as hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/ef17de14e77e88401d31bb1a26d8cd11/info/a1907788811a426ab867c8438b0513f3 2024-11-13T13:38:06,763 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/ef17de14e77e88401d31bb1a26d8cd11/info/a1907788811a426ab867c8438b0513f3, entries=7, sequenceid=252, filesize=12.2 K 2024-11-13T13:38:06,764 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=12.61 KB/12912 for ef17de14e77e88401d31bb1a26d8cd11 in 28ms, sequenceid=252, compaction requested=true 2024-11-13T13:38:06,764 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for ef17de14e77e88401d31bb1a26d8cd11: 2024-11-13T13:38:06,764 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ef17de14e77e88401d31bb1a26d8cd11:info, priority=-2147483648, current under compaction store size is 1 2024-11-13T13:38:06,764 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-13T13:38:06,764 DEBUG [RS:0;bfeb2336aed7:38995-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-13T13:38:06,765 DEBUG [RS:0;bfeb2336aed7:38995-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 158097 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-13T13:38:06,766 DEBUG [RS:0;bfeb2336aed7:38995-shortCompactions-0 {}] regionserver.HStore(1541): ef17de14e77e88401d31bb1a26d8cd11/info is initiating minor compaction (all files) 2024-11-13T13:38:06,766 INFO [RS:0;bfeb2336aed7:38995-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of ef17de14e77e88401d31bb1a26d8cd11/info in TestLogRolling-testLogRolling,row0062,1731505048821.ef17de14e77e88401d31bb1a26d8cd11. 2024-11-13T13:38:06,766 INFO [RS:0;bfeb2336aed7:38995-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/ef17de14e77e88401d31bb1a26d8cd11/info/07f0c591b3b6420583b4429fa3c33ec0, hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/ef17de14e77e88401d31bb1a26d8cd11/info/a63bbd9561c8456b8f028e5f1601d6c5, hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/ef17de14e77e88401d31bb1a26d8cd11/info/a1907788811a426ab867c8438b0513f3] into tmpdir=hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/ef17de14e77e88401d31bb1a26d8cd11/.tmp, totalSize=154.4 K 2024-11-13T13:38:06,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38995 {}] regionserver.HRegion(8855): Flush requested on ef17de14e77e88401d31bb1a26d8cd11 2024-11-13T13:38:06,766 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing ef17de14e77e88401d31bb1a26d8cd11 1/1 column families, dataSize=13.66 KB heapSize=14.88 KB 2024-11-13T13:38:06,766 DEBUG [RS:0;bfeb2336aed7:38995-shortCompactions-0 {}] compactions.Compactor(225): Compacting 07f0c591b3b6420583b4429fa3c33ec0, keycount=112, bloomtype=ROW, size=123.6 K, encoding=NONE, compression=NONE, seqNum=225, earliestPutTs=1731505046351 2024-11-13T13:38:06,766 DEBUG [RS:0;bfeb2336aed7:38995-shortCompactions-0 {}] compactions.Compactor(225): Compacting a63bbd9561c8456b8f028e5f1601d6c5, keycount=13, bloomtype=ROW, size=18.6 K, encoding=NONE, compression=NONE, seqNum=241, earliestPutTs=1731505084691 2024-11-13T13:38:06,767 DEBUG [RS:0;bfeb2336aed7:38995-shortCompactions-0 {}] compactions.Compactor(225): Compacting a1907788811a426ab867c8438b0513f3, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=252, earliestPutTs=1731505084718 2024-11-13T13:38:06,770 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/ef17de14e77e88401d31bb1a26d8cd11/.tmp/info/c88fe52f660449c692e7335646b7fdf1 is 1080, key is row0194/info:/1731505086738/Put/seqid=0 2024-11-13T13:38:06,775 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40741 is added to blk_1073741868_1044 (size=19011) 2024-11-13T13:38:06,776 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33351 is added to blk_1073741868_1044 (size=19011) 2024-11-13T13:38:06,779 INFO [RS:0;bfeb2336aed7:38995-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): ef17de14e77e88401d31bb1a26d8cd11#info#compaction#83 average throughput is 67.73 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-13T13:38:06,780 DEBUG [RS:0;bfeb2336aed7:38995-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/ef17de14e77e88401d31bb1a26d8cd11/.tmp/info/584439100aa347d9977145b7016ab682 is 1080, key is row0062/info:/1731505046351/Put/seqid=0 2024-11-13T13:38:06,786 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40741 is added to blk_1073741869_1045 (size=148316) 2024-11-13T13:38:06,787 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33351 is added to blk_1073741869_1045 (size=148316) 2024-11-13T13:38:06,793 DEBUG [RS:0;bfeb2336aed7:38995-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/ef17de14e77e88401d31bb1a26d8cd11/.tmp/info/584439100aa347d9977145b7016ab682 as hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/ef17de14e77e88401d31bb1a26d8cd11/info/584439100aa347d9977145b7016ab682 2024-11-13T13:38:06,800 INFO [RS:0;bfeb2336aed7:38995-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in ef17de14e77e88401d31bb1a26d8cd11/info of ef17de14e77e88401d31bb1a26d8cd11 into 584439100aa347d9977145b7016ab682(size=144.8 K), total size for store is 144.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-13T13:38:06,800 DEBUG [RS:0;bfeb2336aed7:38995-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for ef17de14e77e88401d31bb1a26d8cd11: 2024-11-13T13:38:06,800 INFO [RS:0;bfeb2336aed7:38995-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731505048821.ef17de14e77e88401d31bb1a26d8cd11., storeName=ef17de14e77e88401d31bb1a26d8cd11/info, priority=13, startTime=1731505086764; duration=0sec 2024-11-13T13:38:06,800 DEBUG [RS:0;bfeb2336aed7:38995-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-13T13:38:06,800 DEBUG [RS:0;bfeb2336aed7:38995-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ef17de14e77e88401d31bb1a26d8cd11:info 2024-11-13T13:38:06,805 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38995 {}] regionserver.HRegion(5310): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=ef17de14e77e88401d31bb1a26d8cd11, server=bfeb2336aed7,38995,1731505032992 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:3.0.0-beta-2-SNAPSHOT] 2024-11-13T13:38:06,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38995 {}] ipc.CallRunner(138): callId: 232 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:34298 deadline: 1731505096805, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=ef17de14e77e88401d31bb1a26d8cd11, server=bfeb2336aed7,38995,1731505032992 2024-11-13T13:38:06,806 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,row0062,1731505048821.ef17de14e77e88401d31bb1a26d8cd11., hostname=bfeb2336aed7,38995,1731505032992, seqNum=106 , the old value is region=TestLogRolling-testLogRolling,row0062,1731505048821.ef17de14e77e88401d31bb1a26d8cd11., hostname=bfeb2336aed7,38995,1731505032992, seqNum=106, error=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=ef17de14e77e88401d31bb1a26d8cd11, server=bfeb2336aed7,38995,1731505032992 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-13T13:38:06,806 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,row0062,1731505048821.ef17de14e77e88401d31bb1a26d8cd11., hostname=bfeb2336aed7,38995,1731505032992, seqNum=106 is org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=ef17de14e77e88401d31bb1a26d8cd11, server=bfeb2336aed7,38995,1731505032992 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-13T13:38:06,806 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(76): Will not update region=TestLogRolling-testLogRolling,row0062,1731505048821.ef17de14e77e88401d31bb1a26d8cd11., hostname=bfeb2336aed7,38995,1731505032992, seqNum=106 because the exception is null or not the one we care about 2024-11-13T13:38:07,176 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.66 KB at sequenceid=268 (bloomFilter=true), to=hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/ef17de14e77e88401d31bb1a26d8cd11/.tmp/info/c88fe52f660449c692e7335646b7fdf1 2024-11-13T13:38:07,181 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/ef17de14e77e88401d31bb1a26d8cd11/.tmp/info/c88fe52f660449c692e7335646b7fdf1 as hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/ef17de14e77e88401d31bb1a26d8cd11/info/c88fe52f660449c692e7335646b7fdf1 2024-11-13T13:38:07,187 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/ef17de14e77e88401d31bb1a26d8cd11/info/c88fe52f660449c692e7335646b7fdf1, entries=13, sequenceid=268, filesize=18.6 K 2024-11-13T13:38:07,187 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,33523,1731504891245/bfeb2336aed7%2C33523%2C1731504891245.meta.1731504892258.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:38:07,187 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,38781,1731504892507/bfeb2336aed7%2C38781%2C1731504892507.1731504892747 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:38:07,188 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~13.66 KB/13988, heapSize ~14.86 KB/15216, currentSize=16.81 KB/17216 for ef17de14e77e88401d31bb1a26d8cd11 in 422ms, sequenceid=268, compaction requested=false 2024-11-13T13:38:07,188 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for ef17de14e77e88401d31bb1a26d8cd11: 2024-11-13T13:38:08,187 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,33523,1731504891245/bfeb2336aed7%2C33523%2C1731504891245.meta.1731504892258.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:38:08,187 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,38781,1731504892507/bfeb2336aed7%2C38781%2C1731504892507.1731504892747 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:38:09,188 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,33523,1731504891245/bfeb2336aed7%2C33523%2C1731504891245.meta.1731504892258.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:38:09,188 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,38781,1731504892507/bfeb2336aed7%2C38781%2C1731504892507.1731504892747 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:38:10,189 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,38781,1731504892507/bfeb2336aed7%2C38781%2C1731504892507.1731504892747 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:38:10,189 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,33523,1731504891245/bfeb2336aed7%2C33523%2C1731504891245.meta.1731504892258.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:38:11,189 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,38781,1731504892507/bfeb2336aed7%2C38781%2C1731504892507.1731504892747 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:38:11,189 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,33523,1731504891245/bfeb2336aed7%2C33523%2C1731504891245.meta.1731504892258.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:38:12,190 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,38781,1731504892507/bfeb2336aed7%2C38781%2C1731504892507.1731504892747 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:38:12,190 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,33523,1731504891245/bfeb2336aed7%2C33523%2C1731504891245.meta.1731504892258.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:38:12,549 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster-MemStoreChunkPool Statistics {}] regionserver.ChunkCreator$MemStoreChunkPool$StatisticsThread(417): data stats (chunk size=2097152): current pool size=3, created chunk count=9, reused chunk count=68, reuseRatio=88.31% 2024-11-13T13:38:12,549 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster-MemStoreChunkPool Statistics {}] regionserver.ChunkCreator$MemStoreChunkPool$StatisticsThread(417): index stats (chunk size=209715): current pool size=0, created chunk count=0, reused chunk count=0, reuseRatio=0 2024-11-13T13:38:12,747 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-13T13:38:13,191 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,33523,1731504891245/bfeb2336aed7%2C33523%2C1731504891245.meta.1731504892258.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:38:13,191 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,38781,1731504892507/bfeb2336aed7%2C38781%2C1731504892507.1731504892747 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:38:14,191 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,33523,1731504891245/bfeb2336aed7%2C33523%2C1731504891245.meta.1731504892258.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:38:14,191 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,38781,1731504892507/bfeb2336aed7%2C38781%2C1731504892507.1731504892747 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:38:14,497 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region ef17de14e77e88401d31bb1a26d8cd11, had cached 0 bytes from a total of 167327 2024-11-13T13:38:14,529 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 0f561145611ba79a8558052544d3d0a2, had cached 0 bytes from a total of 70862 2024-11-13T13:38:15,192 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,38781,1731504892507/bfeb2336aed7%2C38781%2C1731504892507.1731504892747 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:38:15,192 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,33523,1731504891245/bfeb2336aed7%2C33523%2C1731504891245.meta.1731504892258.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:38:16,193 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,38781,1731504892507/bfeb2336aed7%2C38781%2C1731504892507.1731504892747 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:38:16,193 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,33523,1731504891245/bfeb2336aed7%2C33523%2C1731504891245.meta.1731504892258.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:38:16,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38995 {}] regionserver.HRegion(8855): Flush requested on ef17de14e77e88401d31bb1a26d8cd11 2024-11-13T13:38:16,837 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing ef17de14e77e88401d31bb1a26d8cd11 1/1 column families, dataSize=17.86 KB heapSize=19.38 KB 2024-11-13T13:38:16,843 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/ef17de14e77e88401d31bb1a26d8cd11/.tmp/info/22cc96e3aa3b45c98c9242695dc9aed6 is 1080, key is row0207/info:/1731505086767/Put/seqid=0 2024-11-13T13:38:16,848 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40741 is added to blk_1073741870_1046 (size=23333) 2024-11-13T13:38:16,849 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33351 is added to blk_1073741870_1046 (size=23333) 2024-11-13T13:38:16,849 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.86 KB at sequenceid=289 (bloomFilter=true), to=hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/ef17de14e77e88401d31bb1a26d8cd11/.tmp/info/22cc96e3aa3b45c98c9242695dc9aed6 2024-11-13T13:38:16,857 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/ef17de14e77e88401d31bb1a26d8cd11/.tmp/info/22cc96e3aa3b45c98c9242695dc9aed6 as hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/ef17de14e77e88401d31bb1a26d8cd11/info/22cc96e3aa3b45c98c9242695dc9aed6 2024-11-13T13:38:16,861 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/ef17de14e77e88401d31bb1a26d8cd11/info/22cc96e3aa3b45c98c9242695dc9aed6, entries=17, sequenceid=289, filesize=22.8 K 2024-11-13T13:38:16,862 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~17.86 KB/18292, heapSize ~19.36 KB/19824, currentSize=1.05 KB/1076 for ef17de14e77e88401d31bb1a26d8cd11 in 26ms, sequenceid=289, compaction requested=true 2024-11-13T13:38:16,862 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for ef17de14e77e88401d31bb1a26d8cd11: 2024-11-13T13:38:16,862 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ef17de14e77e88401d31bb1a26d8cd11:info, priority=-2147483648, current under compaction store size is 1 2024-11-13T13:38:16,862 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-13T13:38:16,862 DEBUG [RS:0;bfeb2336aed7:38995-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-13T13:38:16,864 DEBUG [RS:0;bfeb2336aed7:38995-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 190660 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-13T13:38:16,864 DEBUG [RS:0;bfeb2336aed7:38995-shortCompactions-0 {}] regionserver.HStore(1541): ef17de14e77e88401d31bb1a26d8cd11/info is initiating minor compaction (all files) 2024-11-13T13:38:16,864 INFO [RS:0;bfeb2336aed7:38995-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of ef17de14e77e88401d31bb1a26d8cd11/info in TestLogRolling-testLogRolling,row0062,1731505048821.ef17de14e77e88401d31bb1a26d8cd11. 2024-11-13T13:38:16,864 INFO [RS:0;bfeb2336aed7:38995-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/ef17de14e77e88401d31bb1a26d8cd11/info/584439100aa347d9977145b7016ab682, hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/ef17de14e77e88401d31bb1a26d8cd11/info/c88fe52f660449c692e7335646b7fdf1, hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/ef17de14e77e88401d31bb1a26d8cd11/info/22cc96e3aa3b45c98c9242695dc9aed6] into tmpdir=hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/ef17de14e77e88401d31bb1a26d8cd11/.tmp, totalSize=186.2 K 2024-11-13T13:38:16,864 DEBUG [RS:0;bfeb2336aed7:38995-shortCompactions-0 {}] compactions.Compactor(225): Compacting 584439100aa347d9977145b7016ab682, keycount=132, bloomtype=ROW, size=144.8 K, encoding=NONE, compression=NONE, seqNum=252, earliestPutTs=1731505046351 2024-11-13T13:38:16,864 DEBUG [RS:0;bfeb2336aed7:38995-shortCompactions-0 {}] compactions.Compactor(225): Compacting c88fe52f660449c692e7335646b7fdf1, keycount=13, bloomtype=ROW, size=18.6 K, encoding=NONE, compression=NONE, seqNum=268, earliestPutTs=1731505086738 2024-11-13T13:38:16,865 DEBUG [RS:0;bfeb2336aed7:38995-shortCompactions-0 {}] compactions.Compactor(225): Compacting 22cc96e3aa3b45c98c9242695dc9aed6, keycount=17, bloomtype=ROW, size=22.8 K, encoding=NONE, compression=NONE, seqNum=289, earliestPutTs=1731505086767 2024-11-13T13:38:16,878 INFO [RS:0;bfeb2336aed7:38995-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): ef17de14e77e88401d31bb1a26d8cd11#info#compaction#85 average throughput is 41.56 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-13T13:38:16,879 DEBUG [RS:0;bfeb2336aed7:38995-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/ef17de14e77e88401d31bb1a26d8cd11/.tmp/info/a135b6077f1f412ab9ee394adc3c5159 is 1080, key is row0062/info:/1731505046351/Put/seqid=0 2024-11-13T13:38:16,890 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40741 is added to blk_1073741871_1047 (size=180794) 2024-11-13T13:38:16,891 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33351 is added to blk_1073741871_1047 (size=180794) 2024-11-13T13:38:16,897 DEBUG [RS:0;bfeb2336aed7:38995-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/ef17de14e77e88401d31bb1a26d8cd11/.tmp/info/a135b6077f1f412ab9ee394adc3c5159 as hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/ef17de14e77e88401d31bb1a26d8cd11/info/a135b6077f1f412ab9ee394adc3c5159 2024-11-13T13:38:16,903 INFO [RS:0;bfeb2336aed7:38995-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in ef17de14e77e88401d31bb1a26d8cd11/info of ef17de14e77e88401d31bb1a26d8cd11 into a135b6077f1f412ab9ee394adc3c5159(size=176.6 K), total size for store is 176.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-13T13:38:16,903 DEBUG [RS:0;bfeb2336aed7:38995-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for ef17de14e77e88401d31bb1a26d8cd11: 2024-11-13T13:38:16,903 INFO [RS:0;bfeb2336aed7:38995-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731505048821.ef17de14e77e88401d31bb1a26d8cd11., storeName=ef17de14e77e88401d31bb1a26d8cd11/info, priority=13, startTime=1731505096862; duration=0sec 2024-11-13T13:38:16,903 DEBUG [RS:0;bfeb2336aed7:38995-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-13T13:38:16,903 DEBUG [RS:0;bfeb2336aed7:38995-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ef17de14e77e88401d31bb1a26d8cd11:info 2024-11-13T13:38:17,194 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,38781,1731504892507/bfeb2336aed7%2C38781%2C1731504892507.1731504892747 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:38:17,194 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,33523,1731504891245/bfeb2336aed7%2C33523%2C1731504891245.meta.1731504892258.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:38:18,194 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,33523,1731504891245/bfeb2336aed7%2C33523%2C1731504891245.meta.1731504892258.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:38:18,194 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,38781,1731504892507/bfeb2336aed7%2C38781%2C1731504892507.1731504892747 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:38:18,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38995 {}] regionserver.HRegion(8855): Flush requested on ef17de14e77e88401d31bb1a26d8cd11 2024-11-13T13:38:18,862 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing ef17de14e77e88401d31bb1a26d8cd11 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-13T13:38:18,868 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/ef17de14e77e88401d31bb1a26d8cd11/.tmp/info/878f1b6c384543f1955aec48d82bcd56 is 1080, key is row0224/info:/1731505096840/Put/seqid=0 2024-11-13T13:38:18,874 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33351 is added to blk_1073741872_1048 (size=12523) 2024-11-13T13:38:18,874 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40741 is added to blk_1073741872_1048 (size=12523) 2024-11-13T13:38:18,875 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=300 (bloomFilter=true), to=hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/ef17de14e77e88401d31bb1a26d8cd11/.tmp/info/878f1b6c384543f1955aec48d82bcd56 2024-11-13T13:38:18,880 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/ef17de14e77e88401d31bb1a26d8cd11/.tmp/info/878f1b6c384543f1955aec48d82bcd56 as hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/ef17de14e77e88401d31bb1a26d8cd11/info/878f1b6c384543f1955aec48d82bcd56 2024-11-13T13:38:18,886 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/ef17de14e77e88401d31bb1a26d8cd11/info/878f1b6c384543f1955aec48d82bcd56, entries=7, sequenceid=300, filesize=12.2 K 2024-11-13T13:38:18,887 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=11.56 KB/11836 for ef17de14e77e88401d31bb1a26d8cd11 in 24ms, sequenceid=300, compaction requested=false 2024-11-13T13:38:18,887 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for ef17de14e77e88401d31bb1a26d8cd11: 2024-11-13T13:38:18,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38995 {}] regionserver.HRegion(8855): Flush requested on ef17de14e77e88401d31bb1a26d8cd11 2024-11-13T13:38:18,888 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing ef17de14e77e88401d31bb1a26d8cd11 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-11-13T13:38:18,892 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/ef17de14e77e88401d31bb1a26d8cd11/.tmp/info/968f712eb7544e3cb8c84697be72a2f0 is 1080, key is row0231/info:/1731505098864/Put/seqid=0 2024-11-13T13:38:18,896 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33351 is added to blk_1073741873_1049 (size=17918) 2024-11-13T13:38:18,897 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40741 is added to blk_1073741873_1049 (size=17918) 2024-11-13T13:38:18,897 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=315 (bloomFilter=true), to=hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/ef17de14e77e88401d31bb1a26d8cd11/.tmp/info/968f712eb7544e3cb8c84697be72a2f0 2024-11-13T13:38:18,903 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/ef17de14e77e88401d31bb1a26d8cd11/.tmp/info/968f712eb7544e3cb8c84697be72a2f0 as hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/ef17de14e77e88401d31bb1a26d8cd11/info/968f712eb7544e3cb8c84697be72a2f0 2024-11-13T13:38:18,907 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/ef17de14e77e88401d31bb1a26d8cd11/info/968f712eb7544e3cb8c84697be72a2f0, entries=12, sequenceid=315, filesize=17.5 K 2024-11-13T13:38:18,908 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=10.51 KB/10760 for ef17de14e77e88401d31bb1a26d8cd11 in 20ms, sequenceid=315, compaction requested=true 2024-11-13T13:38:18,908 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for ef17de14e77e88401d31bb1a26d8cd11: 2024-11-13T13:38:18,909 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ef17de14e77e88401d31bb1a26d8cd11:info, priority=-2147483648, current under compaction store size is 1 2024-11-13T13:38:18,909 DEBUG [RS:0;bfeb2336aed7:38995-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-13T13:38:18,909 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-13T13:38:18,910 DEBUG [RS:0;bfeb2336aed7:38995-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 211235 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-13T13:38:18,910 DEBUG [RS:0;bfeb2336aed7:38995-shortCompactions-0 {}] regionserver.HStore(1541): ef17de14e77e88401d31bb1a26d8cd11/info is initiating minor compaction (all files) 2024-11-13T13:38:18,910 INFO [RS:0;bfeb2336aed7:38995-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of ef17de14e77e88401d31bb1a26d8cd11/info in TestLogRolling-testLogRolling,row0062,1731505048821.ef17de14e77e88401d31bb1a26d8cd11. 2024-11-13T13:38:18,910 INFO [RS:0;bfeb2336aed7:38995-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/ef17de14e77e88401d31bb1a26d8cd11/info/a135b6077f1f412ab9ee394adc3c5159, hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/ef17de14e77e88401d31bb1a26d8cd11/info/878f1b6c384543f1955aec48d82bcd56, hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/ef17de14e77e88401d31bb1a26d8cd11/info/968f712eb7544e3cb8c84697be72a2f0] into tmpdir=hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/ef17de14e77e88401d31bb1a26d8cd11/.tmp, totalSize=206.3 K 2024-11-13T13:38:18,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38995 {}] regionserver.HRegion(8855): Flush requested on ef17de14e77e88401d31bb1a26d8cd11 2024-11-13T13:38:18,910 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing ef17de14e77e88401d31bb1a26d8cd11 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-11-13T13:38:18,910 DEBUG [RS:0;bfeb2336aed7:38995-shortCompactions-0 {}] compactions.Compactor(225): Compacting a135b6077f1f412ab9ee394adc3c5159, keycount=162, bloomtype=ROW, size=176.6 K, encoding=NONE, compression=NONE, seqNum=289, earliestPutTs=1731505046351 2024-11-13T13:38:18,910 DEBUG [RS:0;bfeb2336aed7:38995-shortCompactions-0 {}] compactions.Compactor(225): Compacting 878f1b6c384543f1955aec48d82bcd56, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=300, earliestPutTs=1731505096840 2024-11-13T13:38:18,911 DEBUG [RS:0;bfeb2336aed7:38995-shortCompactions-0 {}] compactions.Compactor(225): Compacting 968f712eb7544e3cb8c84697be72a2f0, keycount=12, bloomtype=ROW, size=17.5 K, encoding=NONE, compression=NONE, seqNum=315, earliestPutTs=1731505098864 2024-11-13T13:38:18,914 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/ef17de14e77e88401d31bb1a26d8cd11/.tmp/info/e0d02d279f83469f91beb47fcb3e3580 is 1080, key is row0243/info:/1731505098889/Put/seqid=0 2024-11-13T13:38:18,919 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33351 is added to blk_1073741874_1050 (size=17918) 2024-11-13T13:38:18,919 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40741 is added to blk_1073741874_1050 (size=17918) 2024-11-13T13:38:18,923 INFO [RS:0;bfeb2336aed7:38995-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): ef17de14e77e88401d31bb1a26d8cd11#info#compaction#89 average throughput is 61.91 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-13T13:38:18,924 DEBUG [RS:0;bfeb2336aed7:38995-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/ef17de14e77e88401d31bb1a26d8cd11/.tmp/info/83538c627ead4d10b68b97f3349367a1 is 1080, key is row0062/info:/1731505046351/Put/seqid=0 2024-11-13T13:38:18,929 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33351 is added to blk_1073741875_1051 (size=201401) 2024-11-13T13:38:18,930 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40741 is added to blk_1073741875_1051 (size=201401) 2024-11-13T13:38:18,934 DEBUG [RS:0;bfeb2336aed7:38995-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/ef17de14e77e88401d31bb1a26d8cd11/.tmp/info/83538c627ead4d10b68b97f3349367a1 as hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/ef17de14e77e88401d31bb1a26d8cd11/info/83538c627ead4d10b68b97f3349367a1 2024-11-13T13:38:18,939 INFO [RS:0;bfeb2336aed7:38995-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in ef17de14e77e88401d31bb1a26d8cd11/info of ef17de14e77e88401d31bb1a26d8cd11 into 83538c627ead4d10b68b97f3349367a1(size=196.7 K), total size for store is 196.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-13T13:38:18,939 DEBUG [RS:0;bfeb2336aed7:38995-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for ef17de14e77e88401d31bb1a26d8cd11: 2024-11-13T13:38:18,939 INFO [RS:0;bfeb2336aed7:38995-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731505048821.ef17de14e77e88401d31bb1a26d8cd11., storeName=ef17de14e77e88401d31bb1a26d8cd11/info, priority=13, startTime=1731505098908; duration=0sec 2024-11-13T13:38:18,939 DEBUG [RS:0;bfeb2336aed7:38995-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-13T13:38:18,939 DEBUG [RS:0;bfeb2336aed7:38995-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ef17de14e77e88401d31bb1a26d8cd11:info 2024-11-13T13:38:19,195 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,38781,1731504892507/bfeb2336aed7%2C38781%2C1731504892507.1731504892747 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:38:19,195 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,33523,1731504891245/bfeb2336aed7%2C33523%2C1731504891245.meta.1731504892258.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:38:19,320 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=330 (bloomFilter=true), to=hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/ef17de14e77e88401d31bb1a26d8cd11/.tmp/info/e0d02d279f83469f91beb47fcb3e3580 2024-11-13T13:38:19,324 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/ef17de14e77e88401d31bb1a26d8cd11/.tmp/info/e0d02d279f83469f91beb47fcb3e3580 as hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/ef17de14e77e88401d31bb1a26d8cd11/info/e0d02d279f83469f91beb47fcb3e3580 2024-11-13T13:38:19,328 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/ef17de14e77e88401d31bb1a26d8cd11/info/e0d02d279f83469f91beb47fcb3e3580, entries=12, sequenceid=330, filesize=17.5 K 2024-11-13T13:38:19,329 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=2.10 KB/2152 for ef17de14e77e88401d31bb1a26d8cd11 in 419ms, sequenceid=330, compaction requested=false 2024-11-13T13:38:19,329 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for ef17de14e77e88401d31bb1a26d8cd11: 2024-11-13T13:38:19,409 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:38:19,409 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:38:19,409 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:38:19,409 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:38:19,410 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:38:19,410 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:38:19,410 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:38:19,411 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:38:19,443 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:38:19,443 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:38:19,443 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:38:19,443 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:38:19,443 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:38:19,443 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:38:19,446 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:38:19,446 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:38:19,447 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:38:19,448 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:38:19,955 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-13T13:38:19,956 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:38:19,956 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:38:19,956 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:38:19,956 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:38:19,957 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:38:19,957 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:38:19,957 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:38:19,957 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:38:19,992 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:38:19,992 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:38:19,992 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:38:19,992 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:38:19,992 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:38:19,993 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:38:19,997 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:38:19,997 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:38:19,997 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:38:20,000 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:38:20,195 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,33523,1731504891245/bfeb2336aed7%2C33523%2C1731504891245.meta.1731504892258.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:38:20,196 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,38781,1731504892507/bfeb2336aed7%2C38781%2C1731504892507.1731504892747 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:38:20,915 INFO [Time-limited test {}] wal.AbstractTestLogRolling(285): after writing there are 0 log files 2024-11-13T13:38:20,915 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor bfeb2336aed7%2C38995%2C1731505032992.1731505100915 2024-11-13T13:38:20,932 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:38:20,932 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:38:20,932 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:38:20,932 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:38:20,932 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:38:20,932 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/WALs/bfeb2336aed7,38995,1731505032992/bfeb2336aed7%2C38995%2C1731505032992.1731505033611 with entries=313, filesize=308.61 KB; new WAL /user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/WALs/bfeb2336aed7,38995,1731505032992/bfeb2336aed7%2C38995%2C1731505032992.1731505100915 2024-11-13T13:38:20,934 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41091:41091),(127.0.0.1/127.0.0.1:41057:41057)] 2024-11-13T13:38:20,934 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/WALs/bfeb2336aed7,38995,1731505032992/bfeb2336aed7%2C38995%2C1731505032992.1731505033611 is not closed yet, will try archiving it next time 2024-11-13T13:38:20,934 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33351 is added to blk_1073741833_1009 (size=316020) 2024-11-13T13:38:20,935 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40741 is added to blk_1073741833_1009 (size=316020) 2024-11-13T13:38:20,936 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 0f561145611ba79a8558052544d3d0a2: 2024-11-13T13:38:20,936 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=670 B heapSize=2.02 KB 2024-11-13T13:38:20,940 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/hbase/meta/1588230740/.tmp/info/1ef22596286d4514bc13831b50ca5f3b is 186, key is TestLogRolling-testLogRolling,,1731505048821.0f561145611ba79a8558052544d3d0a2./info:regioninfo/1731505049565/Put/seqid=0 2024-11-13T13:38:20,944 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40741 is added to blk_1073741877_1053 (size=6153) 2024-11-13T13:38:20,944 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33351 is added to blk_1073741877_1053 (size=6153) 2024-11-13T13:38:20,945 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=670 B at sequenceid=21 (bloomFilter=true), to=hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/hbase/meta/1588230740/.tmp/info/1ef22596286d4514bc13831b50ca5f3b 2024-11-13T13:38:20,951 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/hbase/meta/1588230740/.tmp/info/1ef22596286d4514bc13831b50ca5f3b as hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/hbase/meta/1588230740/info/1ef22596286d4514bc13831b50ca5f3b 2024-11-13T13:38:20,957 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/hbase/meta/1588230740/info/1ef22596286d4514bc13831b50ca5f3b, entries=5, sequenceid=21, filesize=6.0 K 2024-11-13T13:38:20,958 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~670 B/670, heapSize ~1.25 KB/1280, currentSize=0 B/0 for 1588230740 in 22ms, sequenceid=21, compaction requested=false 2024-11-13T13:38:20,958 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-11-13T13:38:20,958 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing ef17de14e77e88401d31bb1a26d8cd11 1/1 column families, dataSize=2.10 KB heapSize=2.50 KB 2024-11-13T13:38:20,963 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/ef17de14e77e88401d31bb1a26d8cd11/.tmp/info/dcc1cc716ecc48489c1e5000d52a271c is 1080, key is row0255/info:/1731505098911/Put/seqid=0 2024-11-13T13:38:20,970 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33351 is added to blk_1073741878_1054 (size=7116) 2024-11-13T13:38:20,971 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40741 is added to blk_1073741878_1054 (size=7116) 2024-11-13T13:38:20,971 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.10 KB at sequenceid=336 (bloomFilter=true), to=hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/ef17de14e77e88401d31bb1a26d8cd11/.tmp/info/dcc1cc716ecc48489c1e5000d52a271c 2024-11-13T13:38:20,978 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/ef17de14e77e88401d31bb1a26d8cd11/.tmp/info/dcc1cc716ecc48489c1e5000d52a271c as hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/ef17de14e77e88401d31bb1a26d8cd11/info/dcc1cc716ecc48489c1e5000d52a271c 2024-11-13T13:38:20,983 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/ef17de14e77e88401d31bb1a26d8cd11/info/dcc1cc716ecc48489c1e5000d52a271c, entries=2, sequenceid=336, filesize=6.9 K 2024-11-13T13:38:20,985 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~2.10 KB/2152, heapSize ~2.48 KB/2544, currentSize=0 B/0 for ef17de14e77e88401d31bb1a26d8cd11 in 26ms, sequenceid=336, compaction requested=true 2024-11-13T13:38:20,985 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for ef17de14e77e88401d31bb1a26d8cd11: 2024-11-13T13:38:20,985 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor bfeb2336aed7%2C38995%2C1731505032992.1731505100985 2024-11-13T13:38:20,993 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:38:20,993 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:38:20,993 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:38:20,994 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:38:20,994 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:38:20,994 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/WALs/bfeb2336aed7,38995,1731505032992/bfeb2336aed7%2C38995%2C1731505032992.1731505100915 with entries=2, filesize=723 B; new WAL /user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/WALs/bfeb2336aed7,38995,1731505032992/bfeb2336aed7%2C38995%2C1731505032992.1731505100985 2024-11-13T13:38:20,995 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41091:41091),(127.0.0.1/127.0.0.1:41057:41057)] 2024-11-13T13:38:20,995 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/WALs/bfeb2336aed7,38995,1731505032992/bfeb2336aed7%2C38995%2C1731505032992.1731505033611 is not closed yet, will try archiving it next time 2024-11-13T13:38:20,995 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/WALs/bfeb2336aed7,38995,1731505032992/bfeb2336aed7%2C38995%2C1731505032992.1731505100915 is not closed yet, will try archiving it next time 2024-11-13T13:38:20,996 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [5,000] milli-secs(wait.for.ratio=[1]) 2024-11-13T13:38:20,996 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33351 is added to blk_1073741876_1052 (size=731) 2024-11-13T13:38:20,996 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40741 is added to blk_1073741876_1052 (size=731) 2024-11-13T13:38:20,996 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/WALs/bfeb2336aed7,38995,1731505032992/bfeb2336aed7%2C38995%2C1731505032992.1731505033611 is not closed yet, will try archiving it next time 2024-11-13T13:38:21,004 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/WALs/bfeb2336aed7,38995,1731505032992/bfeb2336aed7%2C38995%2C1731505032992.1731505100915 to hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/oldWALs/bfeb2336aed7%2C38995%2C1731505032992.1731505100915 2024-11-13T13:38:21,196 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,33523,1731504891245/bfeb2336aed7%2C33523%2C1731504891245.meta.1731504892258.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:38:21,197 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,38781,1731504892507/bfeb2336aed7%2C38781%2C1731504892507.1731504892747 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:38:21,336 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/WALs/bfeb2336aed7,38995,1731505032992/bfeb2336aed7%2C38995%2C1731505032992.1731505033611 to hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/oldWALs/bfeb2336aed7%2C38995%2C1731505032992.1731505033611 2024-11-13T13:38:21,396 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-13T13:38:21,396 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-13T13:38:21,396 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-13T13:38:21,397 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T13:38:21,397 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T13:38:21,397 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-13T13:38:21,397 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-13T13:38:21,397 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1557662830, stopped=false 2024-11-13T13:38:21,397 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=bfeb2336aed7,45355,1731505032764 2024-11-13T13:38:21,418 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38995-0x101346a09d80001, quorum=127.0.0.1:54356, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-13T13:38:21,418 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45355-0x101346a09d80000, quorum=127.0.0.1:54356, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-13T13:38:21,418 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45355-0x101346a09d80000, quorum=127.0.0.1:54356, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T13:38:21,418 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38995-0x101346a09d80001, quorum=127.0.0.1:54356, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T13:38:21,418 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-13T13:38:21,418 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-13T13:38:21,418 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-13T13:38:21,418 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T13:38:21,419 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'bfeb2336aed7,38995,1731505032992' ***** 2024-11-13T13:38:21,419 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:45355-0x101346a09d80000, quorum=127.0.0.1:54356, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-13T13:38:21,419 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:38995-0x101346a09d80001, quorum=127.0.0.1:54356, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-13T13:38:21,419 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-13T13:38:21,419 INFO [RS:0;bfeb2336aed7:38995 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-13T13:38:21,419 INFO [RS:0;bfeb2336aed7:38995 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-13T13:38:21,419 INFO [RS:0;bfeb2336aed7:38995 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-13T13:38:21,420 INFO [RS:0;bfeb2336aed7:38995 {}] regionserver.HRegionServer(3091): Received CLOSE for 0f561145611ba79a8558052544d3d0a2 2024-11-13T13:38:21,420 INFO [RS:0;bfeb2336aed7:38995 {}] regionserver.HRegionServer(3091): Received CLOSE for ef17de14e77e88401d31bb1a26d8cd11 2024-11-13T13:38:21,420 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-13T13:38:21,420 INFO [RS:0;bfeb2336aed7:38995 {}] regionserver.HRegionServer(959): stopping server bfeb2336aed7,38995,1731505032992 2024-11-13T13:38:21,420 INFO [RS:0;bfeb2336aed7:38995 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-13T13:38:21,420 INFO [RS:0;bfeb2336aed7:38995 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;bfeb2336aed7:38995. 2024-11-13T13:38:21,420 DEBUG [RS_CLOSE_REGION-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 0f561145611ba79a8558052544d3d0a2, disabling compactions & flushes 2024-11-13T13:38:21,420 DEBUG [RS:0;bfeb2336aed7:38995 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-13T13:38:21,420 INFO [RS_CLOSE_REGION-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1731505048821.0f561145611ba79a8558052544d3d0a2. 2024-11-13T13:38:21,420 DEBUG [RS:0;bfeb2336aed7:38995 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T13:38:21,420 INFO [RS:0;bfeb2336aed7:38995 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-13T13:38:21,420 DEBUG [RS_CLOSE_REGION-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1731505048821.0f561145611ba79a8558052544d3d0a2. 2024-11-13T13:38:21,420 INFO [RS:0;bfeb2336aed7:38995 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-13T13:38:21,420 INFO [RS:0;bfeb2336aed7:38995 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-13T13:38:21,420 DEBUG [RS_CLOSE_REGION-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1731505048821.0f561145611ba79a8558052544d3d0a2. after waiting 0 ms 2024-11-13T13:38:21,420 DEBUG [RS_CLOSE_REGION-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1731505048821.0f561145611ba79a8558052544d3d0a2. 2024-11-13T13:38:21,420 INFO [RS:0;bfeb2336aed7:38995 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-13T13:38:21,421 INFO [RS:0;bfeb2336aed7:38995 {}] regionserver.HRegionServer(1321): Waiting on 3 regions to close 2024-11-13T13:38:21,421 DEBUG [RS:0;bfeb2336aed7:38995 {}] regionserver.HRegionServer(1325): Online Regions={0f561145611ba79a8558052544d3d0a2=TestLogRolling-testLogRolling,,1731505048821.0f561145611ba79a8558052544d3d0a2., 1588230740=hbase:meta,,1.1588230740, ef17de14e77e88401d31bb1a26d8cd11=TestLogRolling-testLogRolling,row0062,1731505048821.ef17de14e77e88401d31bb1a26d8cd11.} 2024-11-13T13:38:21,421 DEBUG [RS:0;bfeb2336aed7:38995 {}] regionserver.HRegionServer(1351): Waiting on 0f561145611ba79a8558052544d3d0a2, 1588230740, ef17de14e77e88401d31bb1a26d8cd11 2024-11-13T13:38:21,421 DEBUG [RS_CLOSE_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-13T13:38:21,421 INFO [RS_CLOSE_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-13T13:38:21,421 DEBUG [RS_CLOSE_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-13T13:38:21,421 DEBUG [RS_CLOSE_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-13T13:38:21,421 DEBUG [RS_CLOSE_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-13T13:38:21,421 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731505048821.0f561145611ba79a8558052544d3d0a2.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/0f561145611ba79a8558052544d3d0a2/info/5ae30c907d014a109e71cc014ace382b.56afd5c2ff9ec93f3af082ccede46aee->hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/56afd5c2ff9ec93f3af082ccede46aee/info/5ae30c907d014a109e71cc014ace382b-bottom] to archive 2024-11-13T13:38:21,422 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731505048821.0f561145611ba79a8558052544d3d0a2.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-13T13:38:21,424 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731505048821.0f561145611ba79a8558052544d3d0a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/0f561145611ba79a8558052544d3d0a2/info/5ae30c907d014a109e71cc014ace382b.56afd5c2ff9ec93f3af082ccede46aee to hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/archive/data/default/TestLogRolling-testLogRolling/0f561145611ba79a8558052544d3d0a2/info/5ae30c907d014a109e71cc014ace382b.56afd5c2ff9ec93f3af082ccede46aee 2024-11-13T13:38:21,424 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731505048821.0f561145611ba79a8558052544d3d0a2.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=bfeb2336aed7:45355 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 16 more 2024-11-13T13:38:21,424 WARN [StoreCloser-TestLogRolling-testLogRolling,,1731505048821.0f561145611ba79a8558052544d3d0a2.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [] 2024-11-13T13:38:21,425 DEBUG [RS_CLOSE_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/hbase/meta/1588230740/recovered.edits/24.seqid, newMaxSeqId=24, maxSeqId=1 2024-11-13T13:38:21,426 DEBUG [RS_CLOSE_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-13T13:38:21,426 INFO [RS_CLOSE_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-13T13:38:21,426 DEBUG [RS_CLOSE_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731505101421Running coprocessor pre-close hooks at 1731505101421Disabling compacts and flushes for region at 1731505101421Disabling writes for close at 1731505101421Writing region close event to WAL at 1731505101422 (+1 ms)Running coprocessor post-close hooks at 1731505101426 (+4 ms)Closed at 1731505101426 2024-11-13T13:38:21,426 DEBUG [RS_CLOSE_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-13T13:38:21,427 DEBUG [RS_CLOSE_REGION-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/0f561145611ba79a8558052544d3d0a2/recovered.edits/110.seqid, newMaxSeqId=110, maxSeqId=105 2024-11-13T13:38:21,428 INFO [RS_CLOSE_REGION-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1731505048821.0f561145611ba79a8558052544d3d0a2. 2024-11-13T13:38:21,428 DEBUG [RS_CLOSE_REGION-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 0f561145611ba79a8558052544d3d0a2: Waiting for close lock at 1731505101420Running coprocessor pre-close hooks at 1731505101420Disabling compacts and flushes for region at 1731505101420Disabling writes for close at 1731505101420Writing region close event to WAL at 1731505101425 (+5 ms)Running coprocessor post-close hooks at 1731505101428 (+3 ms)Closed at 1731505101428 2024-11-13T13:38:21,428 DEBUG [RS_CLOSE_REGION-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRolling,,1731505048821.0f561145611ba79a8558052544d3d0a2. 2024-11-13T13:38:21,428 DEBUG [RS_CLOSE_REGION-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing ef17de14e77e88401d31bb1a26d8cd11, disabling compactions & flushes 2024-11-13T13:38:21,428 INFO [RS_CLOSE_REGION-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,row0062,1731505048821.ef17de14e77e88401d31bb1a26d8cd11. 2024-11-13T13:38:21,428 DEBUG [RS_CLOSE_REGION-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,row0062,1731505048821.ef17de14e77e88401d31bb1a26d8cd11. 2024-11-13T13:38:21,428 DEBUG [RS_CLOSE_REGION-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,row0062,1731505048821.ef17de14e77e88401d31bb1a26d8cd11. after waiting 0 ms 2024-11-13T13:38:21,428 DEBUG [RS_CLOSE_REGION-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,row0062,1731505048821.ef17de14e77e88401d31bb1a26d8cd11. 2024-11-13T13:38:21,429 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731505048821.ef17de14e77e88401d31bb1a26d8cd11.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/ef17de14e77e88401d31bb1a26d8cd11/info/5ae30c907d014a109e71cc014ace382b.56afd5c2ff9ec93f3af082ccede46aee->hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/56afd5c2ff9ec93f3af082ccede46aee/info/5ae30c907d014a109e71cc014ace382b-top, hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/ef17de14e77e88401d31bb1a26d8cd11/info/2c805ed796c8497e87b801daea923b63, hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/ef17de14e77e88401d31bb1a26d8cd11/info/TestLogRolling-testLogRolling=56afd5c2ff9ec93f3af082ccede46aee-da5c57994b174d6796355e16ac02b6e2, hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/ef17de14e77e88401d31bb1a26d8cd11/info/8af1dff1c79a42abac30c8d03f809e6a, hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/ef17de14e77e88401d31bb1a26d8cd11/info/a34603311abb4c34ac34edc6c8aef0cc, hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/ef17de14e77e88401d31bb1a26d8cd11/info/f095059102a34efab22e66fa44a331c6, hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/ef17de14e77e88401d31bb1a26d8cd11/info/54a53c3607bb45af9c52902fe868b73d, hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/ef17de14e77e88401d31bb1a26d8cd11/info/eb9fd19fb35c4a8ea6dcc9aa4216c544, hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/ef17de14e77e88401d31bb1a26d8cd11/info/c611315fa13c49acbf5562de6f2c0687, hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/ef17de14e77e88401d31bb1a26d8cd11/info/b5d211440b15465991fca94e5f06dd55, hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/ef17de14e77e88401d31bb1a26d8cd11/info/86f66082177e44c6a006afd4f69f0cd3, hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/ef17de14e77e88401d31bb1a26d8cd11/info/d8d5a23485e64a53aff8a97c34224469, hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/ef17de14e77e88401d31bb1a26d8cd11/info/dec47b598cb949e5a1a46ba435b57e2c, hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/ef17de14e77e88401d31bb1a26d8cd11/info/07f0c591b3b6420583b4429fa3c33ec0, hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/ef17de14e77e88401d31bb1a26d8cd11/info/533adfa15338402d871f217c4d5114d2, hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/ef17de14e77e88401d31bb1a26d8cd11/info/a63bbd9561c8456b8f028e5f1601d6c5, hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/ef17de14e77e88401d31bb1a26d8cd11/info/584439100aa347d9977145b7016ab682, hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/ef17de14e77e88401d31bb1a26d8cd11/info/a1907788811a426ab867c8438b0513f3, hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/ef17de14e77e88401d31bb1a26d8cd11/info/c88fe52f660449c692e7335646b7fdf1, hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/ef17de14e77e88401d31bb1a26d8cd11/info/a135b6077f1f412ab9ee394adc3c5159, hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/ef17de14e77e88401d31bb1a26d8cd11/info/22cc96e3aa3b45c98c9242695dc9aed6, hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/ef17de14e77e88401d31bb1a26d8cd11/info/878f1b6c384543f1955aec48d82bcd56, hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/ef17de14e77e88401d31bb1a26d8cd11/info/968f712eb7544e3cb8c84697be72a2f0] to archive 2024-11-13T13:38:21,430 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731505048821.ef17de14e77e88401d31bb1a26d8cd11.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-13T13:38:21,431 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731505048821.ef17de14e77e88401d31bb1a26d8cd11.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/ef17de14e77e88401d31bb1a26d8cd11/info/5ae30c907d014a109e71cc014ace382b.56afd5c2ff9ec93f3af082ccede46aee to hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/archive/data/default/TestLogRolling-testLogRolling/ef17de14e77e88401d31bb1a26d8cd11/info/5ae30c907d014a109e71cc014ace382b.56afd5c2ff9ec93f3af082ccede46aee 2024-11-13T13:38:21,432 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731505048821.ef17de14e77e88401d31bb1a26d8cd11.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/ef17de14e77e88401d31bb1a26d8cd11/info/2c805ed796c8497e87b801daea923b63 to hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/archive/data/default/TestLogRolling-testLogRolling/ef17de14e77e88401d31bb1a26d8cd11/info/2c805ed796c8497e87b801daea923b63 2024-11-13T13:38:21,434 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731505048821.ef17de14e77e88401d31bb1a26d8cd11.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/ef17de14e77e88401d31bb1a26d8cd11/info/TestLogRolling-testLogRolling=56afd5c2ff9ec93f3af082ccede46aee-da5c57994b174d6796355e16ac02b6e2 to hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/archive/data/default/TestLogRolling-testLogRolling/ef17de14e77e88401d31bb1a26d8cd11/info/TestLogRolling-testLogRolling=56afd5c2ff9ec93f3af082ccede46aee-da5c57994b174d6796355e16ac02b6e2 2024-11-13T13:38:21,435 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731505048821.ef17de14e77e88401d31bb1a26d8cd11.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/ef17de14e77e88401d31bb1a26d8cd11/info/8af1dff1c79a42abac30c8d03f809e6a to hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/archive/data/default/TestLogRolling-testLogRolling/ef17de14e77e88401d31bb1a26d8cd11/info/8af1dff1c79a42abac30c8d03f809e6a 2024-11-13T13:38:21,436 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731505048821.ef17de14e77e88401d31bb1a26d8cd11.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/ef17de14e77e88401d31bb1a26d8cd11/info/a34603311abb4c34ac34edc6c8aef0cc to hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/archive/data/default/TestLogRolling-testLogRolling/ef17de14e77e88401d31bb1a26d8cd11/info/a34603311abb4c34ac34edc6c8aef0cc 2024-11-13T13:38:21,437 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731505048821.ef17de14e77e88401d31bb1a26d8cd11.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/ef17de14e77e88401d31bb1a26d8cd11/info/f095059102a34efab22e66fa44a331c6 to hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/archive/data/default/TestLogRolling-testLogRolling/ef17de14e77e88401d31bb1a26d8cd11/info/f095059102a34efab22e66fa44a331c6 2024-11-13T13:38:21,439 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731505048821.ef17de14e77e88401d31bb1a26d8cd11.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/ef17de14e77e88401d31bb1a26d8cd11/info/54a53c3607bb45af9c52902fe868b73d to hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/archive/data/default/TestLogRolling-testLogRolling/ef17de14e77e88401d31bb1a26d8cd11/info/54a53c3607bb45af9c52902fe868b73d 2024-11-13T13:38:21,440 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731505048821.ef17de14e77e88401d31bb1a26d8cd11.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/ef17de14e77e88401d31bb1a26d8cd11/info/eb9fd19fb35c4a8ea6dcc9aa4216c544 to hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/archive/data/default/TestLogRolling-testLogRolling/ef17de14e77e88401d31bb1a26d8cd11/info/eb9fd19fb35c4a8ea6dcc9aa4216c544 2024-11-13T13:38:21,441 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731505048821.ef17de14e77e88401d31bb1a26d8cd11.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/ef17de14e77e88401d31bb1a26d8cd11/info/c611315fa13c49acbf5562de6f2c0687 to hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/archive/data/default/TestLogRolling-testLogRolling/ef17de14e77e88401d31bb1a26d8cd11/info/c611315fa13c49acbf5562de6f2c0687 2024-11-13T13:38:21,442 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731505048821.ef17de14e77e88401d31bb1a26d8cd11.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/ef17de14e77e88401d31bb1a26d8cd11/info/b5d211440b15465991fca94e5f06dd55 to hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/archive/data/default/TestLogRolling-testLogRolling/ef17de14e77e88401d31bb1a26d8cd11/info/b5d211440b15465991fca94e5f06dd55 2024-11-13T13:38:21,444 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731505048821.ef17de14e77e88401d31bb1a26d8cd11.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/ef17de14e77e88401d31bb1a26d8cd11/info/86f66082177e44c6a006afd4f69f0cd3 to hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/archive/data/default/TestLogRolling-testLogRolling/ef17de14e77e88401d31bb1a26d8cd11/info/86f66082177e44c6a006afd4f69f0cd3 2024-11-13T13:38:21,445 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731505048821.ef17de14e77e88401d31bb1a26d8cd11.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/ef17de14e77e88401d31bb1a26d8cd11/info/d8d5a23485e64a53aff8a97c34224469 to hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/archive/data/default/TestLogRolling-testLogRolling/ef17de14e77e88401d31bb1a26d8cd11/info/d8d5a23485e64a53aff8a97c34224469 2024-11-13T13:38:21,446 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731505048821.ef17de14e77e88401d31bb1a26d8cd11.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/ef17de14e77e88401d31bb1a26d8cd11/info/dec47b598cb949e5a1a46ba435b57e2c to hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/archive/data/default/TestLogRolling-testLogRolling/ef17de14e77e88401d31bb1a26d8cd11/info/dec47b598cb949e5a1a46ba435b57e2c 2024-11-13T13:38:21,447 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731505048821.ef17de14e77e88401d31bb1a26d8cd11.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/ef17de14e77e88401d31bb1a26d8cd11/info/07f0c591b3b6420583b4429fa3c33ec0 to hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/archive/data/default/TestLogRolling-testLogRolling/ef17de14e77e88401d31bb1a26d8cd11/info/07f0c591b3b6420583b4429fa3c33ec0 2024-11-13T13:38:21,448 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731505048821.ef17de14e77e88401d31bb1a26d8cd11.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/ef17de14e77e88401d31bb1a26d8cd11/info/533adfa15338402d871f217c4d5114d2 to hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/archive/data/default/TestLogRolling-testLogRolling/ef17de14e77e88401d31bb1a26d8cd11/info/533adfa15338402d871f217c4d5114d2 2024-11-13T13:38:21,449 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731505048821.ef17de14e77e88401d31bb1a26d8cd11.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/ef17de14e77e88401d31bb1a26d8cd11/info/a63bbd9561c8456b8f028e5f1601d6c5 to hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/archive/data/default/TestLogRolling-testLogRolling/ef17de14e77e88401d31bb1a26d8cd11/info/a63bbd9561c8456b8f028e5f1601d6c5 2024-11-13T13:38:21,449 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731505048821.ef17de14e77e88401d31bb1a26d8cd11.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/ef17de14e77e88401d31bb1a26d8cd11/info/584439100aa347d9977145b7016ab682 to hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/archive/data/default/TestLogRolling-testLogRolling/ef17de14e77e88401d31bb1a26d8cd11/info/584439100aa347d9977145b7016ab682 2024-11-13T13:38:21,450 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731505048821.ef17de14e77e88401d31bb1a26d8cd11.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/ef17de14e77e88401d31bb1a26d8cd11/info/a1907788811a426ab867c8438b0513f3 to hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/archive/data/default/TestLogRolling-testLogRolling/ef17de14e77e88401d31bb1a26d8cd11/info/a1907788811a426ab867c8438b0513f3 2024-11-13T13:38:21,451 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731505048821.ef17de14e77e88401d31bb1a26d8cd11.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/ef17de14e77e88401d31bb1a26d8cd11/info/c88fe52f660449c692e7335646b7fdf1 to hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/archive/data/default/TestLogRolling-testLogRolling/ef17de14e77e88401d31bb1a26d8cd11/info/c88fe52f660449c692e7335646b7fdf1 2024-11-13T13:38:21,452 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731505048821.ef17de14e77e88401d31bb1a26d8cd11.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/ef17de14e77e88401d31bb1a26d8cd11/info/a135b6077f1f412ab9ee394adc3c5159 to hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/archive/data/default/TestLogRolling-testLogRolling/ef17de14e77e88401d31bb1a26d8cd11/info/a135b6077f1f412ab9ee394adc3c5159 2024-11-13T13:38:21,453 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731505048821.ef17de14e77e88401d31bb1a26d8cd11.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/ef17de14e77e88401d31bb1a26d8cd11/info/22cc96e3aa3b45c98c9242695dc9aed6 to hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/archive/data/default/TestLogRolling-testLogRolling/ef17de14e77e88401d31bb1a26d8cd11/info/22cc96e3aa3b45c98c9242695dc9aed6 2024-11-13T13:38:21,454 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731505048821.ef17de14e77e88401d31bb1a26d8cd11.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/ef17de14e77e88401d31bb1a26d8cd11/info/878f1b6c384543f1955aec48d82bcd56 to hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/archive/data/default/TestLogRolling-testLogRolling/ef17de14e77e88401d31bb1a26d8cd11/info/878f1b6c384543f1955aec48d82bcd56 2024-11-13T13:38:21,455 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731505048821.ef17de14e77e88401d31bb1a26d8cd11.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/ef17de14e77e88401d31bb1a26d8cd11/info/968f712eb7544e3cb8c84697be72a2f0 to hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/archive/data/default/TestLogRolling-testLogRolling/ef17de14e77e88401d31bb1a26d8cd11/info/968f712eb7544e3cb8c84697be72a2f0 2024-11-13T13:38:21,455 WARN [StoreCloser-TestLogRolling-testLogRolling,row0062,1731505048821.ef17de14e77e88401d31bb1a26d8cd11.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [2c805ed796c8497e87b801daea923b63=26696, 8af1dff1c79a42abac30c8d03f809e6a=12509, a34603311abb4c34ac34edc6c8aef0cc=44066, f095059102a34efab22e66fa44a331c6=14663, 54a53c3607bb45af9c52902fe868b73d=17906, eb9fd19fb35c4a8ea6dcc9aa4216c544=74493, c611315fa13c49acbf5562de6f2c0687=22238, b5d211440b15465991fca94e5f06dd55=12516, 86f66082177e44c6a006afd4f69f0cd3=103705, d8d5a23485e64a53aff8a97c34224469=26550, dec47b598cb949e5a1a46ba435b57e2c=14672, 07f0c591b3b6420583b4429fa3c33ec0=126581, 533adfa15338402d871f217c4d5114d2=17906, a63bbd9561c8456b8f028e5f1601d6c5=19000, 584439100aa347d9977145b7016ab682=148316, a1907788811a426ab867c8438b0513f3=12516, c88fe52f660449c692e7335646b7fdf1=19011, a135b6077f1f412ab9ee394adc3c5159=180794, 22cc96e3aa3b45c98c9242695dc9aed6=23333, 878f1b6c384543f1955aec48d82bcd56=12523, 968f712eb7544e3cb8c84697be72a2f0=17918] 2024-11-13T13:38:21,458 DEBUG [RS_CLOSE_REGION-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/data/default/TestLogRolling-testLogRolling/ef17de14e77e88401d31bb1a26d8cd11/recovered.edits/339.seqid, newMaxSeqId=339, maxSeqId=105 2024-11-13T13:38:21,458 INFO [RS_CLOSE_REGION-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,row0062,1731505048821.ef17de14e77e88401d31bb1a26d8cd11. 2024-11-13T13:38:21,459 DEBUG [RS_CLOSE_REGION-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for ef17de14e77e88401d31bb1a26d8cd11: Waiting for close lock at 1731505101428Running coprocessor pre-close hooks at 1731505101428Disabling compacts and flushes for region at 1731505101428Disabling writes for close at 1731505101428Writing region close event to WAL at 1731505101455 (+27 ms)Running coprocessor post-close hooks at 1731505101458 (+3 ms)Closed at 1731505101458 2024-11-13T13:38:21,459 DEBUG [RS_CLOSE_REGION-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRolling,row0062,1731505048821.ef17de14e77e88401d31bb1a26d8cd11. 2024-11-13T13:38:21,480 INFO [regionserver/bfeb2336aed7:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-13T13:38:21,534 INFO [regionserver/bfeb2336aed7:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-13T13:38:21,534 INFO [regionserver/bfeb2336aed7:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-13T13:38:21,621 INFO [RS:0;bfeb2336aed7:38995 {}] regionserver.HRegionServer(976): stopping server bfeb2336aed7,38995,1731505032992; all regions closed. 2024-11-13T13:38:21,621 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:38:21,622 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:38:21,622 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:38:21,622 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:38:21,622 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:38:21,624 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40741 is added to blk_1073741834_1010 (size=8107) 2024-11-13T13:38:21,624 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33351 is added to blk_1073741834_1010 (size=8107) 2024-11-13T13:38:21,626 DEBUG [RS:0;bfeb2336aed7:38995 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/oldWALs 2024-11-13T13:38:21,626 INFO [RS:0;bfeb2336aed7:38995 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog bfeb2336aed7%2C38995%2C1731505032992.meta:.meta(num 1731505034012) 2024-11-13T13:38:21,626 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:38:21,626 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:38:21,626 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:38:21,626 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:38:21,626 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:38:21,628 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33351 is added to blk_1073741879_1055 (size=778) 2024-11-13T13:38:21,628 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40741 is added to blk_1073741879_1055 (size=778) 2024-11-13T13:38:21,630 DEBUG [RS:0;bfeb2336aed7:38995 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/oldWALs 2024-11-13T13:38:21,630 INFO [RS:0;bfeb2336aed7:38995 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog bfeb2336aed7%2C38995%2C1731505032992:(num 1731505100985) 2024-11-13T13:38:21,630 DEBUG [RS:0;bfeb2336aed7:38995 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T13:38:21,630 INFO [RS:0;bfeb2336aed7:38995 {}] regionserver.LeaseManager(133): Closed leases 2024-11-13T13:38:21,630 INFO [RS:0;bfeb2336aed7:38995 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-13T13:38:21,631 INFO [RS:0;bfeb2336aed7:38995 {}] hbase.ChoreService(370): Chore service for: regionserver/bfeb2336aed7:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-13T13:38:21,631 INFO [RS:0;bfeb2336aed7:38995 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-13T13:38:21,631 INFO [regionserver/bfeb2336aed7:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-13T13:38:21,631 INFO [RS:0;bfeb2336aed7:38995 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:38995 2024-11-13T13:38:21,649 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38995-0x101346a09d80001, quorum=127.0.0.1:54356, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/bfeb2336aed7,38995,1731505032992 2024-11-13T13:38:21,649 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45355-0x101346a09d80000, quorum=127.0.0.1:54356, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-13T13:38:21,649 INFO [RS:0;bfeb2336aed7:38995 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-13T13:38:21,660 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [bfeb2336aed7,38995,1731505032992] 2024-11-13T13:38:21,670 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/bfeb2336aed7,38995,1731505032992 already deleted, retry=false 2024-11-13T13:38:21,670 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; bfeb2336aed7,38995,1731505032992 expired; onlineServers=0 2024-11-13T13:38:21,670 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'bfeb2336aed7,45355,1731505032764' ***** 2024-11-13T13:38:21,670 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-13T13:38:21,670 INFO [M:0;bfeb2336aed7:45355 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-13T13:38:21,671 INFO [M:0;bfeb2336aed7:45355 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-13T13:38:21,671 DEBUG [M:0;bfeb2336aed7:45355 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-13T13:38:21,671 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-13T13:38:21,671 DEBUG [M:0;bfeb2336aed7:45355 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-13T13:38:21,671 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster-HFileCleaner.small.0-1731505033358 {}] cleaner.HFileCleaner(306): Exit Thread[master/bfeb2336aed7:0:becomeActiveMaster-HFileCleaner.small.0-1731505033358,5,FailOnTimeoutGroup] 2024-11-13T13:38:21,671 INFO [M:0;bfeb2336aed7:45355 {}] hbase.ChoreService(370): Chore service for: master/bfeb2336aed7:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-13T13:38:21,671 INFO [M:0;bfeb2336aed7:45355 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-13T13:38:21,671 DEBUG [M:0;bfeb2336aed7:45355 {}] master.HMaster(1795): Stopping service threads 2024-11-13T13:38:21,671 INFO [M:0;bfeb2336aed7:45355 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-13T13:38:21,671 INFO [M:0;bfeb2336aed7:45355 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-13T13:38:21,671 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster-HFileCleaner.large.0-1731505033358 {}] cleaner.HFileCleaner(306): Exit Thread[master/bfeb2336aed7:0:becomeActiveMaster-HFileCleaner.large.0-1731505033358,5,FailOnTimeoutGroup] 2024-11-13T13:38:21,671 INFO [M:0;bfeb2336aed7:45355 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-13T13:38:21,671 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-13T13:38:21,681 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45355-0x101346a09d80000, quorum=127.0.0.1:54356, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-13T13:38:21,681 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45355-0x101346a09d80000, quorum=127.0.0.1:54356, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T13:38:21,691 DEBUG [M:0;bfeb2336aed7:45355 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/master already deleted, retry=false 2024-11-13T13:38:21,691 DEBUG [M:0;bfeb2336aed7:45355 {}] master.ActiveMasterManager(353): master:45355-0x101346a09d80000, quorum=127.0.0.1:54356, baseZNode=/hbase Failed delete of our master address node; KeeperErrorCode = NoNode for /hbase/master 2024-11-13T13:38:21,692 INFO [M:0;bfeb2336aed7:45355 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/.lastflushedseqids 2024-11-13T13:38:21,699 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40741 is added to blk_1073741880_1056 (size=228) 2024-11-13T13:38:21,700 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33351 is added to blk_1073741880_1056 (size=228) 2024-11-13T13:38:21,701 INFO [M:0;bfeb2336aed7:45355 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-13T13:38:21,701 INFO [M:0;bfeb2336aed7:45355 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-13T13:38:21,701 DEBUG [M:0;bfeb2336aed7:45355 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-13T13:38:21,701 INFO [M:0;bfeb2336aed7:45355 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T13:38:21,701 DEBUG [M:0;bfeb2336aed7:45355 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T13:38:21,701 DEBUG [M:0;bfeb2336aed7:45355 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-13T13:38:21,701 DEBUG [M:0;bfeb2336aed7:45355 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T13:38:21,701 INFO [M:0;bfeb2336aed7:45355 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=51.41 KB heapSize=63.33 KB 2024-11-13T13:38:21,717 DEBUG [M:0;bfeb2336aed7:45355 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/a66e6c65a5ab4e118496bb006279377e is 82, key is hbase:meta,,1/info:regioninfo/1731505034037/Put/seqid=0 2024-11-13T13:38:21,722 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33351 is added to blk_1073741881_1057 (size=5672) 2024-11-13T13:38:21,722 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40741 is added to blk_1073741881_1057 (size=5672) 2024-11-13T13:38:21,722 INFO [M:0;bfeb2336aed7:45355 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/a66e6c65a5ab4e118496bb006279377e 2024-11-13T13:38:21,740 DEBUG [M:0;bfeb2336aed7:45355 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/c9bad09c1694421ca29738a349603046 is 749, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1731505034508/Put/seqid=0 2024-11-13T13:38:21,744 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33351 is added to blk_1073741882_1058 (size=7089) 2024-11-13T13:38:21,744 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40741 is added to blk_1073741882_1058 (size=7089) 2024-11-13T13:38:21,745 INFO [M:0;bfeb2336aed7:45355 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=50.80 KB at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/c9bad09c1694421ca29738a349603046 2024-11-13T13:38:21,749 INFO [M:0;bfeb2336aed7:45355 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for c9bad09c1694421ca29738a349603046 2024-11-13T13:38:21,760 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38995-0x101346a09d80001, quorum=127.0.0.1:54356, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-13T13:38:21,760 INFO [RS:0;bfeb2336aed7:38995 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-13T13:38:21,760 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38995-0x101346a09d80001, quorum=127.0.0.1:54356, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-13T13:38:21,760 INFO [RS:0;bfeb2336aed7:38995 {}] regionserver.HRegionServer(1031): Exiting; stopping=bfeb2336aed7,38995,1731505032992; zookeeper connection closed. 2024-11-13T13:38:21,760 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@2e296537 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@2e296537 2024-11-13T13:38:21,761 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-13T13:38:21,762 DEBUG [M:0;bfeb2336aed7:45355 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/2bb9d9ef628b4eacb7fac8c80b411e79 is 69, key is bfeb2336aed7,38995,1731505032992/rs:state/1731505033454/Put/seqid=0 2024-11-13T13:38:21,767 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33351 is added to blk_1073741883_1059 (size=5156) 2024-11-13T13:38:21,767 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40741 is added to blk_1073741883_1059 (size=5156) 2024-11-13T13:38:21,768 INFO [M:0;bfeb2336aed7:45355 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/2bb9d9ef628b4eacb7fac8c80b411e79 2024-11-13T13:38:21,787 DEBUG [M:0;bfeb2336aed7:45355 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/b2532ce69dde4420b4c8c590da5e5f06 is 52, key is load_balancer_on/state:d/1731505034124/Put/seqid=0 2024-11-13T13:38:21,791 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33351 is added to blk_1073741884_1060 (size=5056) 2024-11-13T13:38:21,791 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40741 is added to blk_1073741884_1060 (size=5056) 2024-11-13T13:38:21,792 INFO [M:0;bfeb2336aed7:45355 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/b2532ce69dde4420b4c8c590da5e5f06 2024-11-13T13:38:21,799 DEBUG [M:0;bfeb2336aed7:45355 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/a66e6c65a5ab4e118496bb006279377e as hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/a66e6c65a5ab4e118496bb006279377e 2024-11-13T13:38:21,804 INFO [M:0;bfeb2336aed7:45355 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/a66e6c65a5ab4e118496bb006279377e, entries=8, sequenceid=125, filesize=5.5 K 2024-11-13T13:38:21,805 DEBUG [M:0;bfeb2336aed7:45355 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/c9bad09c1694421ca29738a349603046 as hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/c9bad09c1694421ca29738a349603046 2024-11-13T13:38:21,809 INFO [M:0;bfeb2336aed7:45355 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for c9bad09c1694421ca29738a349603046 2024-11-13T13:38:21,809 INFO [M:0;bfeb2336aed7:45355 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/c9bad09c1694421ca29738a349603046, entries=13, sequenceid=125, filesize=6.9 K 2024-11-13T13:38:21,811 DEBUG [M:0;bfeb2336aed7:45355 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/2bb9d9ef628b4eacb7fac8c80b411e79 as hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/2bb9d9ef628b4eacb7fac8c80b411e79 2024-11-13T13:38:21,816 INFO [M:0;bfeb2336aed7:45355 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/2bb9d9ef628b4eacb7fac8c80b411e79, entries=1, sequenceid=125, filesize=5.0 K 2024-11-13T13:38:21,820 DEBUG [M:0;bfeb2336aed7:45355 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/b2532ce69dde4420b4c8c590da5e5f06 as hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/b2532ce69dde4420b4c8c590da5e5f06 2024-11-13T13:38:21,827 INFO [M:0;bfeb2336aed7:45355 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41599/user/jenkins/test-data/d305b1c0-0c4b-0dfd-0bb5-9af50ca9182d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/b2532ce69dde4420b4c8c590da5e5f06, entries=1, sequenceid=125, filesize=4.9 K 2024-11-13T13:38:21,827 INFO [M:0;bfeb2336aed7:45355 {}] regionserver.HRegion(3140): Finished flush of dataSize ~51.41 KB/52639, heapSize ~63.27 KB/64784, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 126ms, sequenceid=125, compaction requested=false 2024-11-13T13:38:21,829 INFO [M:0;bfeb2336aed7:45355 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T13:38:21,829 DEBUG [M:0;bfeb2336aed7:45355 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731505101701Disabling compacts and flushes for region at 1731505101701Disabling writes for close at 1731505101701Obtaining lock to block concurrent updates at 1731505101701Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731505101701Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=52639, getHeapSize=64784, getOffHeapSize=0, getCellsCount=148 at 1731505101702 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731505101702Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731505101702Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731505101717 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731505101717Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731505101726 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731505101739 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731505101739Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731505101749 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731505101762 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731505101762Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731505101773 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731505101787 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731505101787Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@258fd375: reopening flushed file at 1731505101798 (+11 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5e912c57: reopening flushed file at 1731505101804 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@19dab62a: reopening flushed file at 1731505101810 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3d261ea8: reopening flushed file at 1731505101816 (+6 ms)Finished flush of dataSize ~51.41 KB/52639, heapSize ~63.27 KB/64784, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 126ms, sequenceid=125, compaction requested=false at 1731505101827 (+11 ms)Writing region close event to WAL at 1731505101829 (+2 ms)Closed at 1731505101829 2024-11-13T13:38:21,832 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:38:21,832 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:38:21,832 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:38:21,833 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:38:21,833 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:38:21,835 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40741 is added to blk_1073741830_1006 (size=61308) 2024-11-13T13:38:21,835 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33351 is added to blk_1073741830_1006 (size=61308) 2024-11-13T13:38:21,836 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-13T13:38:21,836 INFO [M:0;bfeb2336aed7:45355 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-13T13:38:21,836 INFO [M:0;bfeb2336aed7:45355 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:45355 2024-11-13T13:38:21,836 INFO [M:0;bfeb2336aed7:45355 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-13T13:38:21,948 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-13T13:38:21,948 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-13T13:38:21,948 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-13T13:38:21,948 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling 2024-11-13T13:38:21,949 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45355-0x101346a09d80000, quorum=127.0.0.1:54356, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-13T13:38:21,949 INFO [M:0;bfeb2336aed7:45355 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-13T13:38:21,949 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45355-0x101346a09d80000, quorum=127.0.0.1:54356, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-13T13:38:21,953 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@72925ee1{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-13T13:38:21,953 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6f4aa33e{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-13T13:38:21,953 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-13T13:38:21,953 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@286b8c80{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-13T13:38:21,954 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6a5db76d{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/20aa6c7e-d63c-e9d6-7376-290e0252652e/hadoop.log.dir/,STOPPED} 2024-11-13T13:38:21,955 WARN [BP-521197480-172.17.0.2-1731505030550 heartbeating to localhost/127.0.0.1:41599 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-13T13:38:21,955 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-13T13:38:21,955 WARN [BP-521197480-172.17.0.2-1731505030550 heartbeating to localhost/127.0.0.1:41599 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-521197480-172.17.0.2-1731505030550 (Datanode Uuid d1b6e1b6-6758-48f3-bd11-c93d7e409ab8) service to localhost/127.0.0.1:41599 2024-11-13T13:38:21,955 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-13T13:38:21,956 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/20aa6c7e-d63c-e9d6-7376-290e0252652e/cluster_4c473c0e-5978-a336-d575-91d8f4138688/data/data3/current/BP-521197480-172.17.0.2-1731505030550 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-13T13:38:21,956 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/20aa6c7e-d63c-e9d6-7376-290e0252652e/cluster_4c473c0e-5978-a336-d575-91d8f4138688/data/data4/current/BP-521197480-172.17.0.2-1731505030550 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-13T13:38:21,956 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-13T13:38:21,969 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3e4bbe36{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-13T13:38:21,969 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@601b78f7{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-13T13:38:21,969 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-13T13:38:21,970 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7c0c72e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-13T13:38:21,970 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5551c062{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/20aa6c7e-d63c-e9d6-7376-290e0252652e/hadoop.log.dir/,STOPPED} 2024-11-13T13:38:21,971 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-13T13:38:21,971 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-13T13:38:21,971 WARN [BP-521197480-172.17.0.2-1731505030550 heartbeating to localhost/127.0.0.1:41599 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-13T13:38:21,971 WARN [BP-521197480-172.17.0.2-1731505030550 heartbeating to localhost/127.0.0.1:41599 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-521197480-172.17.0.2-1731505030550 (Datanode Uuid 21c33374-c183-492e-ad31-1675c2231117) service to localhost/127.0.0.1:41599 2024-11-13T13:38:21,972 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/20aa6c7e-d63c-e9d6-7376-290e0252652e/cluster_4c473c0e-5978-a336-d575-91d8f4138688/data/data1/current/BP-521197480-172.17.0.2-1731505030550 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-13T13:38:21,972 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/20aa6c7e-d63c-e9d6-7376-290e0252652e/cluster_4c473c0e-5978-a336-d575-91d8f4138688/data/data2/current/BP-521197480-172.17.0.2-1731505030550 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-13T13:38:21,972 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-13T13:38:21,979 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@652d6e37{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-13T13:38:21,980 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@444d0b71{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-13T13:38:21,980 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-13T13:38:21,980 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@26881465{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-13T13:38:21,980 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4881a2ed{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/20aa6c7e-d63c-e9d6-7376-290e0252652e/hadoop.log.dir/,STOPPED} 2024-11-13T13:38:21,989 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-13T13:38:22,028 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-13T13:38:22,037 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRolling Thread=230 (was 210) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-41-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:41599 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-14-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-38-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:41599 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-41-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-38-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:41599 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:41599 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:41599 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-40-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-40-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-14-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-39-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-39-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:41599 from jenkins.hfs.6 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: LeaseRenewer:jenkins.hfs.6@localhost:41599 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-41-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:41599 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-38-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-40-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-14-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-39-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=512 (was 483) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=174 (was 285), ProcessCount=11 (was 11), AvailableMemoryMB=4300 (was 3252) - AvailableMemoryMB LEAK? - 2024-11-13T13:38:22,045 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnNothingWritten Thread=230, OpenFileDescriptor=512, MaxFileDescriptor=1048576, SystemLoadAverage=174, ProcessCount=11, AvailableMemoryMB=4300 2024-11-13T13:38:22,046 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-13T13:38:22,046 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/20aa6c7e-d63c-e9d6-7376-290e0252652e/hadoop.log.dir so I do NOT create it in target/test-data/6e1be1b2-7c49-f6df-9c11-a537555bfd83 2024-11-13T13:38:22,046 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/20aa6c7e-d63c-e9d6-7376-290e0252652e/hadoop.tmp.dir so I do NOT create it in target/test-data/6e1be1b2-7c49-f6df-9c11-a537555bfd83 2024-11-13T13:38:22,046 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6e1be1b2-7c49-f6df-9c11-a537555bfd83/cluster_6b760dc0-6066-0ecf-7b1d-b58b7fe4d93c, deleteOnExit=true 2024-11-13T13:38:22,046 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-13T13:38:22,046 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6e1be1b2-7c49-f6df-9c11-a537555bfd83/test.cache.data in system properties and HBase conf 2024-11-13T13:38:22,046 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6e1be1b2-7c49-f6df-9c11-a537555bfd83/hadoop.tmp.dir in system properties and HBase conf 2024-11-13T13:38:22,046 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6e1be1b2-7c49-f6df-9c11-a537555bfd83/hadoop.log.dir in system properties and HBase conf 2024-11-13T13:38:22,046 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6e1be1b2-7c49-f6df-9c11-a537555bfd83/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-13T13:38:22,046 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6e1be1b2-7c49-f6df-9c11-a537555bfd83/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-13T13:38:22,046 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-13T13:38:22,046 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-13T13:38:22,046 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6e1be1b2-7c49-f6df-9c11-a537555bfd83/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-13T13:38:22,046 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6e1be1b2-7c49-f6df-9c11-a537555bfd83/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-13T13:38:22,047 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6e1be1b2-7c49-f6df-9c11-a537555bfd83/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-13T13:38:22,047 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6e1be1b2-7c49-f6df-9c11-a537555bfd83/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-13T13:38:22,047 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6e1be1b2-7c49-f6df-9c11-a537555bfd83/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-13T13:38:22,047 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6e1be1b2-7c49-f6df-9c11-a537555bfd83/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-13T13:38:22,047 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6e1be1b2-7c49-f6df-9c11-a537555bfd83/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-13T13:38:22,047 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6e1be1b2-7c49-f6df-9c11-a537555bfd83/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-13T13:38:22,047 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6e1be1b2-7c49-f6df-9c11-a537555bfd83/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-13T13:38:22,047 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6e1be1b2-7c49-f6df-9c11-a537555bfd83/nfs.dump.dir in system properties and HBase conf 2024-11-13T13:38:22,047 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6e1be1b2-7c49-f6df-9c11-a537555bfd83/java.io.tmpdir in system properties and HBase conf 2024-11-13T13:38:22,047 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6e1be1b2-7c49-f6df-9c11-a537555bfd83/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-13T13:38:22,047 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6e1be1b2-7c49-f6df-9c11-a537555bfd83/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-13T13:38:22,047 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6e1be1b2-7c49-f6df-9c11-a537555bfd83/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-13T13:38:22,059 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-13T13:38:22,197 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,33523,1731504891245/bfeb2336aed7%2C33523%2C1731504891245.meta.1731504892258.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:38:22,197 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,38781,1731504892507/bfeb2336aed7%2C38781%2C1731504892507.1731504892747 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:38:22,501 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-13T13:38:22,504 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-13T13:38:22,505 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-13T13:38:22,505 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-13T13:38:22,505 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-13T13:38:22,506 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-13T13:38:22,506 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@717a950c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6e1be1b2-7c49-f6df-9c11-a537555bfd83/hadoop.log.dir/,AVAILABLE} 2024-11-13T13:38:22,506 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@35d31c30{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-13T13:38:22,597 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@35c58925{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6e1be1b2-7c49-f6df-9c11-a537555bfd83/java.io.tmpdir/jetty-localhost-36943-hadoop-hdfs-3_4_1-tests_jar-_-any-148862991943245020/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-13T13:38:22,598 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@69a2ae1b{HTTP/1.1, (http/1.1)}{localhost:36943} 2024-11-13T13:38:22,598 INFO [Time-limited test {}] server.Server(415): Started @317285ms 2024-11-13T13:38:22,609 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-13T13:38:22,848 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-13T13:38:22,850 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-13T13:38:22,852 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-13T13:38:22,852 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-13T13:38:22,852 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-13T13:38:22,853 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@413a6699{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6e1be1b2-7c49-f6df-9c11-a537555bfd83/hadoop.log.dir/,AVAILABLE} 2024-11-13T13:38:22,853 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@b0e389f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-13T13:38:22,950 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7fcd61c6{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6e1be1b2-7c49-f6df-9c11-a537555bfd83/java.io.tmpdir/jetty-localhost-33653-hadoop-hdfs-3_4_1-tests_jar-_-any-13996519068330305683/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-13T13:38:22,950 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@2ec1c28e{HTTP/1.1, (http/1.1)}{localhost:33653} 2024-11-13T13:38:22,950 INFO [Time-limited test {}] server.Server(415): Started @317638ms 2024-11-13T13:38:22,951 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-13T13:38:22,983 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-13T13:38:22,986 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-13T13:38:22,987 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-13T13:38:22,988 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-13T13:38:22,988 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-13T13:38:22,988 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@64440bf6{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6e1be1b2-7c49-f6df-9c11-a537555bfd83/hadoop.log.dir/,AVAILABLE} 2024-11-13T13:38:22,989 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@44402286{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-13T13:38:23,111 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@49e6dd92{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6e1be1b2-7c49-f6df-9c11-a537555bfd83/java.io.tmpdir/jetty-localhost-34373-hadoop-hdfs-3_4_1-tests_jar-_-any-6083562465208743253/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-13T13:38:23,111 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@403020f8{HTTP/1.1, (http/1.1)}{localhost:34373} 2024-11-13T13:38:23,111 INFO [Time-limited test {}] server.Server(415): Started @317799ms 2024-11-13T13:38:23,113 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-13T13:38:23,197 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,33523,1731504891245/bfeb2336aed7%2C33523%2C1731504891245.meta.1731504892258.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:38:23,198 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,38781,1731504892507/bfeb2336aed7%2C38781%2C1731504892507.1731504892747 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:38:24,198 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,33523,1731504891245/bfeb2336aed7%2C33523%2C1731504891245.meta.1731504892258.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:38:24,198 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,38781,1731504892507/bfeb2336aed7%2C38781%2C1731504892507.1731504892747 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:38:24,306 WARN [Thread-2493 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6e1be1b2-7c49-f6df-9c11-a537555bfd83/cluster_6b760dc0-6066-0ecf-7b1d-b58b7fe4d93c/data/data1/current/BP-1903793704-172.17.0.2-1731505102063/current, will proceed with Du for space computation calculation, 2024-11-13T13:38:24,306 WARN [Thread-2494 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6e1be1b2-7c49-f6df-9c11-a537555bfd83/cluster_6b760dc0-6066-0ecf-7b1d-b58b7fe4d93c/data/data2/current/BP-1903793704-172.17.0.2-1731505102063/current, will proceed with Du for space computation calculation, 2024-11-13T13:38:24,321 WARN [Thread-2457 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-13T13:38:24,324 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x36013c910ec241ee with lease ID 0xd8149609a145432a: Processing first storage report for DS-5be5c1ae-be4e-4378-b835-194a6a875b57 from datanode DatanodeRegistration(127.0.0.1:38931, datanodeUuid=21cbae6f-4761-4b70-b554-95e162128341, infoPort=44213, infoSecurePort=0, ipcPort=41745, storageInfo=lv=-57;cid=testClusterID;nsid=1167155367;c=1731505102063) 2024-11-13T13:38:24,324 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x36013c910ec241ee with lease ID 0xd8149609a145432a: from storage DS-5be5c1ae-be4e-4378-b835-194a6a875b57 node DatanodeRegistration(127.0.0.1:38931, datanodeUuid=21cbae6f-4761-4b70-b554-95e162128341, infoPort=44213, infoSecurePort=0, ipcPort=41745, storageInfo=lv=-57;cid=testClusterID;nsid=1167155367;c=1731505102063), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-13T13:38:24,324 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x36013c910ec241ee with lease ID 0xd8149609a145432a: Processing first storage report for DS-9e67b39d-98de-4c89-bc24-e0cc9cd931e8 from datanode DatanodeRegistration(127.0.0.1:38931, datanodeUuid=21cbae6f-4761-4b70-b554-95e162128341, infoPort=44213, infoSecurePort=0, ipcPort=41745, storageInfo=lv=-57;cid=testClusterID;nsid=1167155367;c=1731505102063) 2024-11-13T13:38:24,324 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x36013c910ec241ee with lease ID 0xd8149609a145432a: from storage DS-9e67b39d-98de-4c89-bc24-e0cc9cd931e8 node DatanodeRegistration(127.0.0.1:38931, datanodeUuid=21cbae6f-4761-4b70-b554-95e162128341, infoPort=44213, infoSecurePort=0, ipcPort=41745, storageInfo=lv=-57;cid=testClusterID;nsid=1167155367;c=1731505102063), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-13T13:38:25,115 WARN [Thread-2504 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6e1be1b2-7c49-f6df-9c11-a537555bfd83/cluster_6b760dc0-6066-0ecf-7b1d-b58b7fe4d93c/data/data3/current/BP-1903793704-172.17.0.2-1731505102063/current, will proceed with Du for space computation calculation, 2024-11-13T13:38:25,116 WARN [Thread-2505 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6e1be1b2-7c49-f6df-9c11-a537555bfd83/cluster_6b760dc0-6066-0ecf-7b1d-b58b7fe4d93c/data/data4/current/BP-1903793704-172.17.0.2-1731505102063/current, will proceed with Du for space computation calculation, 2024-11-13T13:38:25,134 WARN [Thread-2480 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-13T13:38:25,136 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x96eebe16d3f97cfb with lease ID 0xd8149609a145432b: Processing first storage report for DS-f1f0466d-aa63-436b-b4ad-638e1bda7533 from datanode DatanodeRegistration(127.0.0.1:37879, datanodeUuid=26f69e6d-685a-4c7c-9eaa-cccb4cbd1612, infoPort=44807, infoSecurePort=0, ipcPort=40775, storageInfo=lv=-57;cid=testClusterID;nsid=1167155367;c=1731505102063) 2024-11-13T13:38:25,136 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x96eebe16d3f97cfb with lease ID 0xd8149609a145432b: from storage DS-f1f0466d-aa63-436b-b4ad-638e1bda7533 node DatanodeRegistration(127.0.0.1:37879, datanodeUuid=26f69e6d-685a-4c7c-9eaa-cccb4cbd1612, infoPort=44807, infoSecurePort=0, ipcPort=40775, storageInfo=lv=-57;cid=testClusterID;nsid=1167155367;c=1731505102063), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-13T13:38:25,136 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x96eebe16d3f97cfb with lease ID 0xd8149609a145432b: Processing first storage report for DS-cbebc8ba-7290-4d0f-ba8b-cd05e24d0fb7 from datanode DatanodeRegistration(127.0.0.1:37879, datanodeUuid=26f69e6d-685a-4c7c-9eaa-cccb4cbd1612, infoPort=44807, infoSecurePort=0, ipcPort=40775, storageInfo=lv=-57;cid=testClusterID;nsid=1167155367;c=1731505102063) 2024-11-13T13:38:25,136 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x96eebe16d3f97cfb with lease ID 0xd8149609a145432b: from storage DS-cbebc8ba-7290-4d0f-ba8b-cd05e24d0fb7 node DatanodeRegistration(127.0.0.1:37879, datanodeUuid=26f69e6d-685a-4c7c-9eaa-cccb4cbd1612, infoPort=44807, infoSecurePort=0, ipcPort=40775, storageInfo=lv=-57;cid=testClusterID;nsid=1167155367;c=1731505102063), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-13T13:38:25,163 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6e1be1b2-7c49-f6df-9c11-a537555bfd83 2024-11-13T13:38:25,165 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6e1be1b2-7c49-f6df-9c11-a537555bfd83/cluster_6b760dc0-6066-0ecf-7b1d-b58b7fe4d93c/zookeeper_0, clientPort=58622, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6e1be1b2-7c49-f6df-9c11-a537555bfd83/cluster_6b760dc0-6066-0ecf-7b1d-b58b7fe4d93c/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6e1be1b2-7c49-f6df-9c11-a537555bfd83/cluster_6b760dc0-6066-0ecf-7b1d-b58b7fe4d93c/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-13T13:38:25,166 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=58622 2024-11-13T13:38:25,166 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T13:38:25,167 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T13:38:25,174 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38931 is added to blk_1073741825_1001 (size=7) 2024-11-13T13:38:25,174 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37879 is added to blk_1073741825_1001 (size=7) 2024-11-13T13:38:25,175 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:35503/user/jenkins/test-data/d11e75e3-2f35-680e-6fac-df99cc3f5416 with version=8 2024-11-13T13:38:25,175 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:38779/user/jenkins/test-data/52d71c14-a597-0de0-63d6-b75f134ce0ba/hbase-staging 2024-11-13T13:38:25,177 INFO [Time-limited test {}] client.ConnectionUtils(128): master/bfeb2336aed7:0 server-side Connection retries=45 2024-11-13T13:38:25,177 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-13T13:38:25,177 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-13T13:38:25,177 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-13T13:38:25,177 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-13T13:38:25,177 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-13T13:38:25,177 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-13T13:38:25,177 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-13T13:38:25,178 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:38711 2024-11-13T13:38:25,178 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:38711 connecting to ZooKeeper ensemble=127.0.0.1:58622 2024-11-13T13:38:25,199 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,33523,1731504891245/bfeb2336aed7%2C33523%2C1731504891245.meta.1731504892258.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:38:25,199 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,38781,1731504892507/bfeb2336aed7%2C38781%2C1731504892507.1731504892747 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:38:25,354 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:387110x0, quorum=127.0.0.1:58622, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-13T13:38:25,354 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:38711-0x101346b24b80000 connected 2024-11-13T13:38:25,439 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T13:38:25,440 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T13:38:25,442 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:38711-0x101346b24b80000, quorum=127.0.0.1:58622, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-13T13:38:25,442 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:35503/user/jenkins/test-data/d11e75e3-2f35-680e-6fac-df99cc3f5416, hbase.cluster.distributed=false 2024-11-13T13:38:25,444 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:38711-0x101346b24b80000, quorum=127.0.0.1:58622, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-13T13:38:25,445 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=38711 2024-11-13T13:38:25,446 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=38711 2024-11-13T13:38:25,448 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=38711 2024-11-13T13:38:25,449 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=38711 2024-11-13T13:38:25,449 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=38711 2024-11-13T13:38:25,466 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/bfeb2336aed7:0 server-side Connection retries=45 2024-11-13T13:38:25,466 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-13T13:38:25,466 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-13T13:38:25,466 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-13T13:38:25,466 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-13T13:38:25,466 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-13T13:38:25,466 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-13T13:38:25,466 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-13T13:38:25,467 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:33057 2024-11-13T13:38:25,468 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:33057 connecting to ZooKeeper ensemble=127.0.0.1:58622 2024-11-13T13:38:25,468 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T13:38:25,470 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T13:38:25,481 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:330570x0, quorum=127.0.0.1:58622, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-13T13:38:25,481 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:330570x0, quorum=127.0.0.1:58622, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-13T13:38:25,482 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:33057-0x101346b24b80001 connected 2024-11-13T13:38:25,482 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-13T13:38:25,484 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-13T13:38:25,485 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33057-0x101346b24b80001, quorum=127.0.0.1:58622, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-13T13:38:25,486 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33057-0x101346b24b80001, quorum=127.0.0.1:58622, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-13T13:38:25,488 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=33057 2024-11-13T13:38:25,489 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=33057 2024-11-13T13:38:25,490 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=33057 2024-11-13T13:38:25,492 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=33057 2024-11-13T13:38:25,496 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=33057 2024-11-13T13:38:25,511 DEBUG [M:0;bfeb2336aed7:38711 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;bfeb2336aed7:38711 2024-11-13T13:38:25,512 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/bfeb2336aed7,38711,1731505105176 2024-11-13T13:38:25,523 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33057-0x101346b24b80001, quorum=127.0.0.1:58622, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-13T13:38:25,523 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38711-0x101346b24b80000, quorum=127.0.0.1:58622, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-13T13:38:25,524 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:38711-0x101346b24b80000, quorum=127.0.0.1:58622, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/bfeb2336aed7,38711,1731505105176 2024-11-13T13:38:25,533 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33057-0x101346b24b80001, quorum=127.0.0.1:58622, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-13T13:38:25,533 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38711-0x101346b24b80000, quorum=127.0.0.1:58622, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T13:38:25,534 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33057-0x101346b24b80001, quorum=127.0.0.1:58622, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T13:38:25,534 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:38711-0x101346b24b80000, quorum=127.0.0.1:58622, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-13T13:38:25,534 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/bfeb2336aed7,38711,1731505105176 from backup master directory 2024-11-13T13:38:25,544 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33057-0x101346b24b80001, quorum=127.0.0.1:58622, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-13T13:38:25,544 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38711-0x101346b24b80000, quorum=127.0.0.1:58622, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/bfeb2336aed7,38711,1731505105176 2024-11-13T13:38:25,544 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38711-0x101346b24b80000, quorum=127.0.0.1:58622, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-13T13:38:25,544 WARN [master/bfeb2336aed7:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-13T13:38:25,544 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=bfeb2336aed7,38711,1731505105176 2024-11-13T13:38:25,549 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:35503/user/jenkins/test-data/d11e75e3-2f35-680e-6fac-df99cc3f5416/hbase.id] with ID: a4df24fa-1db4-41f2-b6e9-ecdd955782ee 2024-11-13T13:38:25,549 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:35503/user/jenkins/test-data/d11e75e3-2f35-680e-6fac-df99cc3f5416/.tmp/hbase.id 2024-11-13T13:38:25,559 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38931 is added to blk_1073741826_1002 (size=42) 2024-11-13T13:38:25,559 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37879 is added to blk_1073741826_1002 (size=42) 2024-11-13T13:38:25,560 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:35503/user/jenkins/test-data/d11e75e3-2f35-680e-6fac-df99cc3f5416/.tmp/hbase.id]:[hdfs://localhost:35503/user/jenkins/test-data/d11e75e3-2f35-680e-6fac-df99cc3f5416/hbase.id] 2024-11-13T13:38:25,570 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T13:38:25,570 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-13T13:38:25,571 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-13T13:38:25,660 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38711-0x101346b24b80000, quorum=127.0.0.1:58622, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T13:38:25,660 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33057-0x101346b24b80001, quorum=127.0.0.1:58622, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T13:38:25,666 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37879 is added to blk_1073741827_1003 (size=196) 2024-11-13T13:38:25,666 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38931 is added to blk_1073741827_1003 (size=196) 2024-11-13T13:38:25,667 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-13T13:38:25,667 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-13T13:38:25,668 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-13T13:38:25,676 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37879 is added to blk_1073741828_1004 (size=1189) 2024-11-13T13:38:25,676 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38931 is added to blk_1073741828_1004 (size=1189) 2024-11-13T13:38:25,677 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:35503/user/jenkins/test-data/d11e75e3-2f35-680e-6fac-df99cc3f5416/MasterData/data/master/store 2024-11-13T13:38:25,683 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38931 is added to blk_1073741829_1005 (size=34) 2024-11-13T13:38:25,683 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37879 is added to blk_1073741829_1005 (size=34) 2024-11-13T13:38:25,684 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T13:38:25,684 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-13T13:38:25,684 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T13:38:25,684 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T13:38:25,684 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-13T13:38:25,684 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T13:38:25,684 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T13:38:25,684 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731505105684Disabling compacts and flushes for region at 1731505105684Disabling writes for close at 1731505105684Writing region close event to WAL at 1731505105684Closed at 1731505105684 2024-11-13T13:38:25,685 WARN [master/bfeb2336aed7:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:35503/user/jenkins/test-data/d11e75e3-2f35-680e-6fac-df99cc3f5416/MasterData/data/master/store/.initializing 2024-11-13T13:38:25,686 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:35503/user/jenkins/test-data/d11e75e3-2f35-680e-6fac-df99cc3f5416/MasterData/WALs/bfeb2336aed7,38711,1731505105176 2024-11-13T13:38:25,688 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=bfeb2336aed7%2C38711%2C1731505105176, suffix=, logDir=hdfs://localhost:35503/user/jenkins/test-data/d11e75e3-2f35-680e-6fac-df99cc3f5416/MasterData/WALs/bfeb2336aed7,38711,1731505105176, archiveDir=hdfs://localhost:35503/user/jenkins/test-data/d11e75e3-2f35-680e-6fac-df99cc3f5416/MasterData/oldWALs, maxLogs=10 2024-11-13T13:38:25,689 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor bfeb2336aed7%2C38711%2C1731505105176.1731505105689 2024-11-13T13:38:25,701 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/d11e75e3-2f35-680e-6fac-df99cc3f5416/MasterData/WALs/bfeb2336aed7,38711,1731505105176/bfeb2336aed7%2C38711%2C1731505105176.1731505105689 2024-11-13T13:38:25,702 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44213:44213),(127.0.0.1/127.0.0.1:44807:44807)] 2024-11-13T13:38:25,705 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-13T13:38:25,705 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T13:38:25,705 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-13T13:38:25,705 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-13T13:38:25,707 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-13T13:38:25,708 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-13T13:38:25,708 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T13:38:25,709 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T13:38:25,709 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-13T13:38:25,710 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-13T13:38:25,710 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T13:38:25,711 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T13:38:25,711 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-13T13:38:25,712 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-13T13:38:25,712 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T13:38:25,712 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T13:38:25,712 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-13T13:38:25,714 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-13T13:38:25,714 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T13:38:25,714 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T13:38:25,714 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-13T13:38:25,715 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35503/user/jenkins/test-data/d11e75e3-2f35-680e-6fac-df99cc3f5416/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-13T13:38:25,715 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35503/user/jenkins/test-data/d11e75e3-2f35-680e-6fac-df99cc3f5416/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-13T13:38:25,722 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-13T13:38:25,722 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-13T13:38:25,723 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-13T13:38:25,726 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-13T13:38:25,727 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35503/user/jenkins/test-data/d11e75e3-2f35-680e-6fac-df99cc3f5416/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-13T13:38:25,728 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=827650, jitterRate=0.05241157114505768}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-13T13:38:25,728 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731505105705Initializing all the Stores at 1731505105706 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731505105706Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731505105707 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731505105707Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731505105707Cleaning up temporary data from old regions at 1731505105722 (+15 ms)Region opened successfully at 1731505105728 (+6 ms) 2024-11-13T13:38:25,732 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-13T13:38:25,736 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5ce69aa7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=bfeb2336aed7/172.17.0.2:0 2024-11-13T13:38:25,737 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-13T13:38:25,737 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-13T13:38:25,737 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-13T13:38:25,737 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-13T13:38:25,738 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-13T13:38:25,738 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-13T13:38:25,738 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-13T13:38:25,741 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-13T13:38:25,742 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38711-0x101346b24b80000, quorum=127.0.0.1:58622, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-13T13:38:25,752 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-13T13:38:25,753 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-13T13:38:25,754 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38711-0x101346b24b80000, quorum=127.0.0.1:58622, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-13T13:38:25,765 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-13T13:38:25,765 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-13T13:38:25,769 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38711-0x101346b24b80000, quorum=127.0.0.1:58622, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-13T13:38:25,775 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-13T13:38:25,776 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38711-0x101346b24b80000, quorum=127.0.0.1:58622, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-13T13:38:25,786 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-13T13:38:25,788 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38711-0x101346b24b80000, quorum=127.0.0.1:58622, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-13T13:38:25,796 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-13T13:38:25,807 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38711-0x101346b24b80000, quorum=127.0.0.1:58622, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-13T13:38:25,807 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33057-0x101346b24b80001, quorum=127.0.0.1:58622, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-13T13:38:25,807 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38711-0x101346b24b80000, quorum=127.0.0.1:58622, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T13:38:25,807 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33057-0x101346b24b80001, quorum=127.0.0.1:58622, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T13:38:25,808 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=bfeb2336aed7,38711,1731505105176, sessionid=0x101346b24b80000, setting cluster-up flag (Was=false) 2024-11-13T13:38:25,828 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38711-0x101346b24b80000, quorum=127.0.0.1:58622, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T13:38:25,828 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33057-0x101346b24b80001, quorum=127.0.0.1:58622, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T13:38:25,860 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-13T13:38:25,861 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=bfeb2336aed7,38711,1731505105176 2024-11-13T13:38:25,881 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38711-0x101346b24b80000, quorum=127.0.0.1:58622, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T13:38:25,881 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33057-0x101346b24b80001, quorum=127.0.0.1:58622, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T13:38:25,913 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-13T13:38:25,916 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=bfeb2336aed7,38711,1731505105176 2024-11-13T13:38:25,919 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:35503/user/jenkins/test-data/d11e75e3-2f35-680e-6fac-df99cc3f5416/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-13T13:38:25,923 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-13T13:38:25,923 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-13T13:38:25,924 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-13T13:38:25,924 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: bfeb2336aed7,38711,1731505105176 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-13T13:38:25,926 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/bfeb2336aed7:0, corePoolSize=5, maxPoolSize=5 2024-11-13T13:38:25,926 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/bfeb2336aed7:0, corePoolSize=5, maxPoolSize=5 2024-11-13T13:38:25,926 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/bfeb2336aed7:0, corePoolSize=5, maxPoolSize=5 2024-11-13T13:38:25,926 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/bfeb2336aed7:0, corePoolSize=5, maxPoolSize=5 2024-11-13T13:38:25,926 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/bfeb2336aed7:0, corePoolSize=10, maxPoolSize=10 2024-11-13T13:38:25,927 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/bfeb2336aed7:0, corePoolSize=1, maxPoolSize=1 2024-11-13T13:38:25,927 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/bfeb2336aed7:0, corePoolSize=2, maxPoolSize=2 2024-11-13T13:38:25,927 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/bfeb2336aed7:0, corePoolSize=1, maxPoolSize=1 2024-11-13T13:38:25,928 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731505135927 2024-11-13T13:38:25,928 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-13T13:38:25,928 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-13T13:38:25,928 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-13T13:38:25,928 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-13T13:38:25,928 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-13T13:38:25,928 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-13T13:38:25,928 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-13T13:38:25,928 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-13T13:38:25,928 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-13T13:38:25,928 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-13T13:38:25,928 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-13T13:38:25,928 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-13T13:38:25,929 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-13T13:38:25,929 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-13T13:38:25,929 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/bfeb2336aed7:0:becomeActiveMaster-HFileCleaner.large.0-1731505105929,5,FailOnTimeoutGroup] 2024-11-13T13:38:25,929 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/bfeb2336aed7:0:becomeActiveMaster-HFileCleaner.small.0-1731505105929,5,FailOnTimeoutGroup] 2024-11-13T13:38:25,929 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-13T13:38:25,929 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T13:38:25,929 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-13T13:38:25,929 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-13T13:38:25,929 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-13T13:38:25,929 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-13T13:38:25,935 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37879 is added to blk_1073741831_1007 (size=1321) 2024-11-13T13:38:25,935 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38931 is added to blk_1073741831_1007 (size=1321) 2024-11-13T13:38:25,936 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:35503/user/jenkins/test-data/d11e75e3-2f35-680e-6fac-df99cc3f5416/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-13T13:38:25,936 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:35503/user/jenkins/test-data/d11e75e3-2f35-680e-6fac-df99cc3f5416 2024-11-13T13:38:25,941 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37879 is added to blk_1073741832_1008 (size=32) 2024-11-13T13:38:25,941 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38931 is added to blk_1073741832_1008 (size=32) 2024-11-13T13:38:25,942 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T13:38:25,944 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-13T13:38:25,945 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-13T13:38:25,945 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T13:38:25,946 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T13:38:25,946 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-13T13:38:25,947 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-13T13:38:25,947 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T13:38:25,948 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T13:38:25,948 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-13T13:38:25,949 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-13T13:38:25,949 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T13:38:25,950 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T13:38:25,950 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-13T13:38:25,951 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-13T13:38:25,951 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T13:38:25,952 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T13:38:25,952 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-13T13:38:25,953 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35503/user/jenkins/test-data/d11e75e3-2f35-680e-6fac-df99cc3f5416/data/hbase/meta/1588230740 2024-11-13T13:38:25,954 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35503/user/jenkins/test-data/d11e75e3-2f35-680e-6fac-df99cc3f5416/data/hbase/meta/1588230740 2024-11-13T13:38:25,956 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-13T13:38:25,956 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-13T13:38:25,956 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-13T13:38:25,958 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-13T13:38:25,961 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35503/user/jenkins/test-data/d11e75e3-2f35-680e-6fac-df99cc3f5416/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-13T13:38:25,961 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=808738, jitterRate=0.02836468815803528}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-13T13:38:25,962 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731505105942Initializing all the Stores at 1731505105943 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731505105943Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731505105943Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731505105943Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731505105943Cleaning up temporary data from old regions at 1731505105956 (+13 ms)Region opened successfully at 1731505105962 (+6 ms) 2024-11-13T13:38:25,962 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-13T13:38:25,963 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-13T13:38:25,963 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-13T13:38:25,963 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-13T13:38:25,963 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-13T13:38:25,963 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-13T13:38:25,963 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731505105962Disabling compacts and flushes for region at 1731505105962Disabling writes for close at 1731505105963 (+1 ms)Writing region close event to WAL at 1731505105963Closed at 1731505105963 2024-11-13T13:38:25,965 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-13T13:38:25,965 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-13T13:38:25,965 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-13T13:38:25,967 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-13T13:38:25,968 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-13T13:38:25,999 INFO [RS:0;bfeb2336aed7:33057 {}] regionserver.HRegionServer(746): ClusterId : a4df24fa-1db4-41f2-b6e9-ecdd955782ee 2024-11-13T13:38:25,999 DEBUG [RS:0;bfeb2336aed7:33057 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-13T13:38:26,009 DEBUG [RS:0;bfeb2336aed7:33057 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-13T13:38:26,009 DEBUG [RS:0;bfeb2336aed7:33057 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-13T13:38:26,021 DEBUG [RS:0;bfeb2336aed7:33057 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-13T13:38:26,021 DEBUG [RS:0;bfeb2336aed7:33057 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@f1bc249, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=bfeb2336aed7/172.17.0.2:0 2024-11-13T13:38:26,039 DEBUG [RS:0;bfeb2336aed7:33057 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;bfeb2336aed7:33057 2024-11-13T13:38:26,039 INFO [RS:0;bfeb2336aed7:33057 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-13T13:38:26,039 INFO [RS:0;bfeb2336aed7:33057 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-13T13:38:26,039 DEBUG [RS:0;bfeb2336aed7:33057 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-13T13:38:26,040 INFO [RS:0;bfeb2336aed7:33057 {}] regionserver.HRegionServer(2659): reportForDuty to master=bfeb2336aed7,38711,1731505105176 with port=33057, startcode=1731505105466 2024-11-13T13:38:26,040 DEBUG [RS:0;bfeb2336aed7:33057 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-13T13:38:26,042 INFO [HMaster-EventLoopGroup-16-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59653, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.7 (auth:SIMPLE), service=RegionServerStatusService 2024-11-13T13:38:26,042 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38711 {}] master.ServerManager(363): Checking decommissioned status of RegionServer bfeb2336aed7,33057,1731505105466 2024-11-13T13:38:26,042 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38711 {}] master.ServerManager(517): Registering regionserver=bfeb2336aed7,33057,1731505105466 2024-11-13T13:38:26,043 DEBUG [RS:0;bfeb2336aed7:33057 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:35503/user/jenkins/test-data/d11e75e3-2f35-680e-6fac-df99cc3f5416 2024-11-13T13:38:26,043 DEBUG [RS:0;bfeb2336aed7:33057 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:35503 2024-11-13T13:38:26,043 DEBUG [RS:0;bfeb2336aed7:33057 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-13T13:38:26,049 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38711-0x101346b24b80000, quorum=127.0.0.1:58622, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-13T13:38:26,049 DEBUG [RS:0;bfeb2336aed7:33057 {}] zookeeper.ZKUtil(111): regionserver:33057-0x101346b24b80001, quorum=127.0.0.1:58622, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/bfeb2336aed7,33057,1731505105466 2024-11-13T13:38:26,050 WARN [RS:0;bfeb2336aed7:33057 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-13T13:38:26,050 INFO [RS:0;bfeb2336aed7:33057 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-13T13:38:26,050 DEBUG [RS:0;bfeb2336aed7:33057 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:35503/user/jenkins/test-data/d11e75e3-2f35-680e-6fac-df99cc3f5416/WALs/bfeb2336aed7,33057,1731505105466 2024-11-13T13:38:26,050 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [bfeb2336aed7,33057,1731505105466] 2024-11-13T13:38:26,052 INFO [RS:0;bfeb2336aed7:33057 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-13T13:38:26,053 INFO [RS:0;bfeb2336aed7:33057 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-13T13:38:26,054 INFO [RS:0;bfeb2336aed7:33057 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-13T13:38:26,054 INFO [RS:0;bfeb2336aed7:33057 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-13T13:38:26,054 INFO [RS:0;bfeb2336aed7:33057 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-13T13:38:26,055 INFO [RS:0;bfeb2336aed7:33057 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-13T13:38:26,055 INFO [RS:0;bfeb2336aed7:33057 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-13T13:38:26,055 DEBUG [RS:0;bfeb2336aed7:33057 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/bfeb2336aed7:0, corePoolSize=1, maxPoolSize=1 2024-11-13T13:38:26,055 DEBUG [RS:0;bfeb2336aed7:33057 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/bfeb2336aed7:0, corePoolSize=1, maxPoolSize=1 2024-11-13T13:38:26,055 DEBUG [RS:0;bfeb2336aed7:33057 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/bfeb2336aed7:0, corePoolSize=1, maxPoolSize=1 2024-11-13T13:38:26,055 DEBUG [RS:0;bfeb2336aed7:33057 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/bfeb2336aed7:0, corePoolSize=1, maxPoolSize=1 2024-11-13T13:38:26,055 DEBUG [RS:0;bfeb2336aed7:33057 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/bfeb2336aed7:0, corePoolSize=1, maxPoolSize=1 2024-11-13T13:38:26,055 DEBUG [RS:0;bfeb2336aed7:33057 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/bfeb2336aed7:0, corePoolSize=2, maxPoolSize=2 2024-11-13T13:38:26,055 DEBUG [RS:0;bfeb2336aed7:33057 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/bfeb2336aed7:0, corePoolSize=1, maxPoolSize=1 2024-11-13T13:38:26,055 DEBUG [RS:0;bfeb2336aed7:33057 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/bfeb2336aed7:0, corePoolSize=1, maxPoolSize=1 2024-11-13T13:38:26,055 DEBUG [RS:0;bfeb2336aed7:33057 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/bfeb2336aed7:0, corePoolSize=1, maxPoolSize=1 2024-11-13T13:38:26,055 DEBUG [RS:0;bfeb2336aed7:33057 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/bfeb2336aed7:0, corePoolSize=1, maxPoolSize=1 2024-11-13T13:38:26,055 DEBUG [RS:0;bfeb2336aed7:33057 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/bfeb2336aed7:0, corePoolSize=1, maxPoolSize=1 2024-11-13T13:38:26,055 DEBUG [RS:0;bfeb2336aed7:33057 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/bfeb2336aed7:0, corePoolSize=1, maxPoolSize=1 2024-11-13T13:38:26,055 DEBUG [RS:0;bfeb2336aed7:33057 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/bfeb2336aed7:0, corePoolSize=3, maxPoolSize=3 2024-11-13T13:38:26,055 DEBUG [RS:0;bfeb2336aed7:33057 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/bfeb2336aed7:0, corePoolSize=3, maxPoolSize=3 2024-11-13T13:38:26,056 INFO [RS:0;bfeb2336aed7:33057 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-13T13:38:26,056 INFO [RS:0;bfeb2336aed7:33057 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-13T13:38:26,056 INFO [RS:0;bfeb2336aed7:33057 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-13T13:38:26,056 INFO [RS:0;bfeb2336aed7:33057 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-13T13:38:26,056 INFO [RS:0;bfeb2336aed7:33057 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-13T13:38:26,056 INFO [RS:0;bfeb2336aed7:33057 {}] hbase.ChoreService(168): Chore ScheduledChore name=bfeb2336aed7,33057,1731505105466-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-13T13:38:26,073 INFO [RS:0;bfeb2336aed7:33057 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-13T13:38:26,073 INFO [RS:0;bfeb2336aed7:33057 {}] hbase.ChoreService(168): Chore ScheduledChore name=bfeb2336aed7,33057,1731505105466-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-13T13:38:26,073 INFO [RS:0;bfeb2336aed7:33057 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-13T13:38:26,073 INFO [RS:0;bfeb2336aed7:33057 {}] regionserver.Replication(171): bfeb2336aed7,33057,1731505105466 started 2024-11-13T13:38:26,084 INFO [RS:0;bfeb2336aed7:33057 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-13T13:38:26,084 INFO [RS:0;bfeb2336aed7:33057 {}] regionserver.HRegionServer(1482): Serving as bfeb2336aed7,33057,1731505105466, RpcServer on bfeb2336aed7/172.17.0.2:33057, sessionid=0x101346b24b80001 2024-11-13T13:38:26,085 DEBUG [RS:0;bfeb2336aed7:33057 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-13T13:38:26,085 DEBUG [RS:0;bfeb2336aed7:33057 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager bfeb2336aed7,33057,1731505105466 2024-11-13T13:38:26,085 DEBUG [RS:0;bfeb2336aed7:33057 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'bfeb2336aed7,33057,1731505105466' 2024-11-13T13:38:26,085 DEBUG [RS:0;bfeb2336aed7:33057 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-13T13:38:26,085 DEBUG [RS:0;bfeb2336aed7:33057 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-13T13:38:26,086 DEBUG [RS:0;bfeb2336aed7:33057 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-13T13:38:26,086 DEBUG [RS:0;bfeb2336aed7:33057 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-13T13:38:26,086 DEBUG [RS:0;bfeb2336aed7:33057 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager bfeb2336aed7,33057,1731505105466 2024-11-13T13:38:26,086 DEBUG [RS:0;bfeb2336aed7:33057 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'bfeb2336aed7,33057,1731505105466' 2024-11-13T13:38:26,086 DEBUG [RS:0;bfeb2336aed7:33057 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-13T13:38:26,086 DEBUG [RS:0;bfeb2336aed7:33057 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-13T13:38:26,087 DEBUG [RS:0;bfeb2336aed7:33057 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-13T13:38:26,087 INFO [RS:0;bfeb2336aed7:33057 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-13T13:38:26,087 INFO [RS:0;bfeb2336aed7:33057 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-13T13:38:26,119 WARN [bfeb2336aed7:38711 {}] assignment.AssignmentManager(2443): No servers available; cannot place 1 unassigned regions. 2024-11-13T13:38:26,191 INFO [RS:0;bfeb2336aed7:33057 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=bfeb2336aed7%2C33057%2C1731505105466, suffix=, logDir=hdfs://localhost:35503/user/jenkins/test-data/d11e75e3-2f35-680e-6fac-df99cc3f5416/WALs/bfeb2336aed7,33057,1731505105466, archiveDir=hdfs://localhost:35503/user/jenkins/test-data/d11e75e3-2f35-680e-6fac-df99cc3f5416/oldWALs, maxLogs=32 2024-11-13T13:38:26,192 INFO [RS:0;bfeb2336aed7:33057 {}] monitor.StreamSlowMonitor(122): New stream slow monitor bfeb2336aed7%2C33057%2C1731505105466.1731505106191 2024-11-13T13:38:26,200 INFO [RS:0;bfeb2336aed7:33057 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/d11e75e3-2f35-680e-6fac-df99cc3f5416/WALs/bfeb2336aed7,33057,1731505105466/bfeb2336aed7%2C33057%2C1731505105466.1731505106191 2024-11-13T13:38:26,199 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,38781,1731504892507/bfeb2336aed7%2C38781%2C1731504892507.1731504892747 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:38:26,199 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,33523,1731504891245/bfeb2336aed7%2C33523%2C1731504891245.meta.1731504892258.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:38:26,201 DEBUG [RS:0;bfeb2336aed7:33057 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44213:44213),(127.0.0.1/127.0.0.1:44807:44807)] 2024-11-13T13:38:26,369 DEBUG [bfeb2336aed7:38711 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-13T13:38:26,369 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=bfeb2336aed7,33057,1731505105466 2024-11-13T13:38:26,370 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as bfeb2336aed7,33057,1731505105466, state=OPENING 2024-11-13T13:38:26,439 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-13T13:38:26,449 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38711-0x101346b24b80000, quorum=127.0.0.1:58622, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T13:38:26,449 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33057-0x101346b24b80001, quorum=127.0.0.1:58622, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T13:38:26,450 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-13T13:38:26,450 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-13T13:38:26,450 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-13T13:38:26,450 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=bfeb2336aed7,33057,1731505105466}] 2024-11-13T13:38:26,603 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-13T13:38:26,605 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55539, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-13T13:38:26,609 INFO [RS_OPEN_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-13T13:38:26,609 INFO [RS_OPEN_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-13T13:38:26,611 INFO [RS_OPEN_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=bfeb2336aed7%2C33057%2C1731505105466.meta, suffix=.meta, logDir=hdfs://localhost:35503/user/jenkins/test-data/d11e75e3-2f35-680e-6fac-df99cc3f5416/WALs/bfeb2336aed7,33057,1731505105466, archiveDir=hdfs://localhost:35503/user/jenkins/test-data/d11e75e3-2f35-680e-6fac-df99cc3f5416/oldWALs, maxLogs=32 2024-11-13T13:38:26,611 INFO [RS_OPEN_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor bfeb2336aed7%2C33057%2C1731505105466.meta.1731505106611.meta 2024-11-13T13:38:26,622 INFO [RS_OPEN_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/d11e75e3-2f35-680e-6fac-df99cc3f5416/WALs/bfeb2336aed7,33057,1731505105466/bfeb2336aed7%2C33057%2C1731505105466.meta.1731505106611.meta 2024-11-13T13:38:26,631 DEBUG [RS_OPEN_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44807:44807),(127.0.0.1/127.0.0.1:44213:44213)] 2024-11-13T13:38:26,636 DEBUG [RS_OPEN_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-13T13:38:26,637 DEBUG [RS_OPEN_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-13T13:38:26,637 DEBUG [RS_OPEN_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-13T13:38:26,637 INFO [RS_OPEN_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-13T13:38:26,637 DEBUG [RS_OPEN_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-13T13:38:26,637 DEBUG [RS_OPEN_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T13:38:26,637 DEBUG [RS_OPEN_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-13T13:38:26,637 DEBUG [RS_OPEN_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-13T13:38:26,639 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-13T13:38:26,640 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-13T13:38:26,640 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T13:38:26,641 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T13:38:26,641 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-13T13:38:26,642 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-13T13:38:26,642 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T13:38:26,642 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T13:38:26,642 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-13T13:38:26,643 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-13T13:38:26,643 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T13:38:26,643 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T13:38:26,643 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-13T13:38:26,644 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-13T13:38:26,644 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T13:38:26,644 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T13:38:26,645 DEBUG [RS_OPEN_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-13T13:38:26,645 DEBUG [RS_OPEN_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35503/user/jenkins/test-data/d11e75e3-2f35-680e-6fac-df99cc3f5416/data/hbase/meta/1588230740 2024-11-13T13:38:26,646 DEBUG [RS_OPEN_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35503/user/jenkins/test-data/d11e75e3-2f35-680e-6fac-df99cc3f5416/data/hbase/meta/1588230740 2024-11-13T13:38:26,647 DEBUG [RS_OPEN_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-13T13:38:26,647 DEBUG [RS_OPEN_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-13T13:38:26,648 DEBUG [RS_OPEN_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-13T13:38:26,649 DEBUG [RS_OPEN_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-13T13:38:26,649 INFO [RS_OPEN_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=740232, jitterRate=-0.0587468147277832}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-13T13:38:26,649 DEBUG [RS_OPEN_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-13T13:38:26,650 DEBUG [RS_OPEN_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731505106638Writing region info on filesystem at 1731505106638Initializing all the Stores at 1731505106639 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731505106639Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731505106639Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731505106639Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731505106639Cleaning up temporary data from old regions at 1731505106647 (+8 ms)Running coprocessor post-open hooks at 1731505106649 (+2 ms)Region opened successfully at 1731505106650 (+1 ms) 2024-11-13T13:38:26,650 INFO [RS_OPEN_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731505106602 2024-11-13T13:38:26,652 DEBUG [RS_OPEN_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-13T13:38:26,653 INFO [RS_OPEN_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-13T13:38:26,653 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=bfeb2336aed7,33057,1731505105466 2024-11-13T13:38:26,654 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as bfeb2336aed7,33057,1731505105466, state=OPEN 2024-11-13T13:38:26,703 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33057-0x101346b24b80001, quorum=127.0.0.1:58622, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-13T13:38:26,703 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38711-0x101346b24b80000, quorum=127.0.0.1:58622, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-13T13:38:26,703 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=bfeb2336aed7,33057,1731505105466 2024-11-13T13:38:26,703 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-13T13:38:26,703 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-13T13:38:26,705 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-13T13:38:26,705 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=bfeb2336aed7,33057,1731505105466 in 253 msec 2024-11-13T13:38:26,707 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-13T13:38:26,707 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 740 msec 2024-11-13T13:38:26,708 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-13T13:38:26,708 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-13T13:38:26,709 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-13T13:38:26,709 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=bfeb2336aed7,33057,1731505105466, seqNum=-1] 2024-11-13T13:38:26,710 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-13T13:38:26,711 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34923, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-13T13:38:26,716 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 794 msec 2024-11-13T13:38:26,716 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731505106716, completionTime=-1 2024-11-13T13:38:26,716 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-13T13:38:26,716 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-13T13:38:26,718 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=1 2024-11-13T13:38:26,718 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731505166718 2024-11-13T13:38:26,718 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731505226718 2024-11-13T13:38:26,718 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 1 msec 2024-11-13T13:38:26,719 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=bfeb2336aed7,38711,1731505105176-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-13T13:38:26,719 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=bfeb2336aed7,38711,1731505105176-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-13T13:38:26,719 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=bfeb2336aed7,38711,1731505105176-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-13T13:38:26,719 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-bfeb2336aed7:38711, period=300000, unit=MILLISECONDS is enabled. 2024-11-13T13:38:26,719 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-13T13:38:26,719 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-13T13:38:26,721 DEBUG [master/bfeb2336aed7:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-13T13:38:26,723 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.179sec 2024-11-13T13:38:26,723 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-13T13:38:26,723 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-13T13:38:26,723 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-13T13:38:26,723 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-13T13:38:26,723 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-13T13:38:26,723 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=bfeb2336aed7,38711,1731505105176-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-13T13:38:26,723 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=bfeb2336aed7,38711,1731505105176-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-13T13:38:26,725 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-13T13:38:26,725 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-13T13:38:26,725 INFO [master/bfeb2336aed7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=bfeb2336aed7,38711,1731505105176-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-13T13:38:26,799 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2d971f20, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T13:38:26,799 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request bfeb2336aed7,38711,-1 for getting cluster id 2024-11-13T13:38:26,799 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-13T13:38:26,800 DEBUG [HMaster-EventLoopGroup-16-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'a4df24fa-1db4-41f2-b6e9-ecdd955782ee' 2024-11-13T13:38:26,800 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-13T13:38:26,800 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "a4df24fa-1db4-41f2-b6e9-ecdd955782ee" 2024-11-13T13:38:26,801 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@73fc411, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T13:38:26,801 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [bfeb2336aed7,38711,-1] 2024-11-13T13:38:26,801 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-13T13:38:26,801 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T13:38:26,802 INFO [HMaster-EventLoopGroup-16-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52550, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-13T13:38:26,803 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@591c2c01, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T13:38:26,803 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-13T13:38:26,804 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=bfeb2336aed7,33057,1731505105466, seqNum=-1] 2024-11-13T13:38:26,804 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-13T13:38:26,805 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59006, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-13T13:38:26,807 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=bfeb2336aed7,38711,1731505105176 2024-11-13T13:38:26,807 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T13:38:26,810 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-13T13:38:26,810 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-13T13:38:26,813 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=test.com%2C8080%2C1, suffix=, logDir=hdfs://localhost:35503/user/jenkins/test-data/d11e75e3-2f35-680e-6fac-df99cc3f5416/WALs/test.com,8080,1, archiveDir=hdfs://localhost:35503/user/jenkins/test-data/d11e75e3-2f35-680e-6fac-df99cc3f5416/oldWALs, maxLogs=32 2024-11-13T13:38:26,813 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor test.com%2C8080%2C1.1731505106813 2024-11-13T13:38:26,819 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/d11e75e3-2f35-680e-6fac-df99cc3f5416/WALs/test.com,8080,1/test.com%2C8080%2C1.1731505106813 2024-11-13T13:38:26,820 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44807:44807),(127.0.0.1/127.0.0.1:44213:44213)] 2024-11-13T13:38:26,821 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor test.com%2C8080%2C1.1731505106821 2024-11-13T13:38:26,826 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:38:26,826 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:38:26,826 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:38:26,826 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:38:26,826 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:38:26,826 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/d11e75e3-2f35-680e-6fac-df99cc3f5416/WALs/test.com,8080,1/test.com%2C8080%2C1.1731505106813 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/d11e75e3-2f35-680e-6fac-df99cc3f5416/WALs/test.com,8080,1/test.com%2C8080%2C1.1731505106821 2024-11-13T13:38:26,827 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44213:44213),(127.0.0.1/127.0.0.1:44807:44807)] 2024-11-13T13:38:26,827 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:35503/user/jenkins/test-data/d11e75e3-2f35-680e-6fac-df99cc3f5416/WALs/test.com,8080,1/test.com%2C8080%2C1.1731505106813 is not closed yet, will try archiving it next time 2024-11-13T13:38:26,828 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:38:26,828 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:38:26,828 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:38:26,828 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:38:26,828 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38931 is added to blk_1073741835_1011 (size=93) 2024-11-13T13:38:26,828 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:38:26,829 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37879 is added to blk_1073741835_1011 (size=93) 2024-11-13T13:38:26,829 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:35503/user/jenkins/test-data/d11e75e3-2f35-680e-6fac-df99cc3f5416/WALs/test.com,8080,1/test.com%2C8080%2C1.1731505106813 to hdfs://localhost:35503/user/jenkins/test-data/d11e75e3-2f35-680e-6fac-df99cc3f5416/oldWALs/test.com%2C8080%2C1.1731505106813 2024-11-13T13:38:26,830 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37879 is added to blk_1073741836_1012 (size=93) 2024-11-13T13:38:26,830 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38931 is added to blk_1073741836_1012 (size=93) 2024-11-13T13:38:26,833 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/d11e75e3-2f35-680e-6fac-df99cc3f5416/oldWALs 2024-11-13T13:38:26,833 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog test.com%2C8080%2C1:(num 1731505106821) 2024-11-13T13:38:26,833 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-13T13:38:26,833 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-13T13:38:26,833 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-13T13:38:26,833 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T13:38:26,833 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T13:38:26,833 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-13T13:38:26,833 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-13T13:38:26,833 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1464142045, stopped=false 2024-11-13T13:38:26,834 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=bfeb2336aed7,38711,1731505105176 2024-11-13T13:38:26,858 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33057-0x101346b24b80001, quorum=127.0.0.1:58622, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-13T13:38:26,858 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38711-0x101346b24b80000, quorum=127.0.0.1:58622, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-13T13:38:26,858 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33057-0x101346b24b80001, quorum=127.0.0.1:58622, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T13:38:26,858 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38711-0x101346b24b80000, quorum=127.0.0.1:58622, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T13:38:26,858 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-13T13:38:26,858 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-13T13:38:26,858 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-13T13:38:26,858 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T13:38:26,858 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:38711-0x101346b24b80000, quorum=127.0.0.1:58622, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-13T13:38:26,858 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'bfeb2336aed7,33057,1731505105466' ***** 2024-11-13T13:38:26,859 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-13T13:38:26,859 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:33057-0x101346b24b80001, quorum=127.0.0.1:58622, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-13T13:38:26,859 INFO [RS:0;bfeb2336aed7:33057 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-13T13:38:26,859 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-13T13:38:26,859 INFO [RS:0;bfeb2336aed7:33057 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-13T13:38:26,859 INFO [RS:0;bfeb2336aed7:33057 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-13T13:38:26,859 INFO [RS:0;bfeb2336aed7:33057 {}] regionserver.HRegionServer(959): stopping server bfeb2336aed7,33057,1731505105466 2024-11-13T13:38:26,859 INFO [RS:0;bfeb2336aed7:33057 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-13T13:38:26,859 INFO [RS:0;bfeb2336aed7:33057 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;bfeb2336aed7:33057. 2024-11-13T13:38:26,859 DEBUG [RS:0;bfeb2336aed7:33057 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-13T13:38:26,859 DEBUG [RS:0;bfeb2336aed7:33057 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T13:38:26,859 INFO [RS:0;bfeb2336aed7:33057 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-13T13:38:26,859 INFO [RS:0;bfeb2336aed7:33057 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-13T13:38:26,859 INFO [RS:0;bfeb2336aed7:33057 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-13T13:38:26,860 INFO [RS:0;bfeb2336aed7:33057 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-13T13:38:26,860 INFO [RS:0;bfeb2336aed7:33057 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-13T13:38:26,860 DEBUG [RS:0;bfeb2336aed7:33057 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-11-13T13:38:26,860 DEBUG [RS:0;bfeb2336aed7:33057 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-13T13:38:26,860 DEBUG [RS_CLOSE_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-13T13:38:26,860 INFO [RS_CLOSE_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-13T13:38:26,860 DEBUG [RS_CLOSE_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-13T13:38:26,860 DEBUG [RS_CLOSE_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-13T13:38:26,860 DEBUG [RS_CLOSE_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-13T13:38:26,860 INFO [RS_CLOSE_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=74 B heapSize=1.22 KB 2024-11-13T13:38:26,877 DEBUG [RS_CLOSE_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35503/user/jenkins/test-data/d11e75e3-2f35-680e-6fac-df99cc3f5416/data/hbase/meta/1588230740/.tmp/ns/ff3325e938c54a93821e793a715431db is 43, key is default/ns:d/1731505106711/Put/seqid=0 2024-11-13T13:38:26,881 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37879 is added to blk_1073741837_1013 (size=5153) 2024-11-13T13:38:26,882 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38931 is added to blk_1073741837_1013 (size=5153) 2024-11-13T13:38:26,882 INFO [RS_CLOSE_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:35503/user/jenkins/test-data/d11e75e3-2f35-680e-6fac-df99cc3f5416/data/hbase/meta/1588230740/.tmp/ns/ff3325e938c54a93821e793a715431db 2024-11-13T13:38:26,886 DEBUG [RS_CLOSE_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35503/user/jenkins/test-data/d11e75e3-2f35-680e-6fac-df99cc3f5416/data/hbase/meta/1588230740/.tmp/ns/ff3325e938c54a93821e793a715431db as hdfs://localhost:35503/user/jenkins/test-data/d11e75e3-2f35-680e-6fac-df99cc3f5416/data/hbase/meta/1588230740/ns/ff3325e938c54a93821e793a715431db 2024-11-13T13:38:26,890 INFO [RS_CLOSE_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35503/user/jenkins/test-data/d11e75e3-2f35-680e-6fac-df99cc3f5416/data/hbase/meta/1588230740/ns/ff3325e938c54a93821e793a715431db, entries=2, sequenceid=6, filesize=5.0 K 2024-11-13T13:38:26,891 INFO [RS_CLOSE_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 31ms, sequenceid=6, compaction requested=false 2024-11-13T13:38:26,891 DEBUG [RS_CLOSE_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-13T13:38:26,895 DEBUG [RS_CLOSE_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35503/user/jenkins/test-data/d11e75e3-2f35-680e-6fac-df99cc3f5416/data/hbase/meta/1588230740/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-13T13:38:26,895 DEBUG [RS_CLOSE_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-13T13:38:26,895 INFO [RS_CLOSE_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-13T13:38:26,895 DEBUG [RS_CLOSE_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731505106860Running coprocessor pre-close hooks at 1731505106860Disabling compacts and flushes for region at 1731505106860Disabling writes for close at 1731505106860Obtaining lock to block concurrent updates at 1731505106860Preparing flush snapshotting stores in 1588230740 at 1731505106860Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=74, getHeapSize=1184, getOffHeapSize=0, getCellsCount=2 at 1731505106861 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1731505106862 (+1 ms)Flushing 1588230740/ns: creating writer at 1731505106862Flushing 1588230740/ns: appending metadata at 1731505106877 (+15 ms)Flushing 1588230740/ns: closing flushed file at 1731505106877Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@62df799e: reopening flushed file at 1731505106886 (+9 ms)Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 31ms, sequenceid=6, compaction requested=false at 1731505106891 (+5 ms)Writing region close event to WAL at 1731505106892 (+1 ms)Running coprocessor post-close hooks at 1731505106895 (+3 ms)Closed at 1731505106895 2024-11-13T13:38:26,896 DEBUG [RS_CLOSE_META-regionserver/bfeb2336aed7:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-13T13:38:26,928 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-13T13:38:26,929 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:38:26,930 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:38:26,930 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:38:26,930 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:38:26,931 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:38:26,931 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:38:26,932 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:38:26,933 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:38:26,933 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:38:26,934 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:38:26,967 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:38:26,967 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:38:26,967 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:38:26,968 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:38:26,968 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:38:26,968 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:38:26,974 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:38:26,974 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:38:26,975 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:38:26,979 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T13:38:27,060 INFO [RS:0;bfeb2336aed7:33057 {}] regionserver.HRegionServer(976): stopping server bfeb2336aed7,33057,1731505105466; all regions closed. 2024-11-13T13:38:27,060 INFO [regionserver/bfeb2336aed7:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-13T13:38:27,060 INFO [regionserver/bfeb2336aed7:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-13T13:38:27,061 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:38:27,061 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:38:27,061 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:38:27,061 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:38:27,061 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:38:27,063 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38931 is added to blk_1073741834_1010 (size=1152) 2024-11-13T13:38:27,063 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37879 is added to blk_1073741834_1010 (size=1152) 2024-11-13T13:38:27,065 DEBUG [RS:0;bfeb2336aed7:33057 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/d11e75e3-2f35-680e-6fac-df99cc3f5416/oldWALs 2024-11-13T13:38:27,065 INFO [RS:0;bfeb2336aed7:33057 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog bfeb2336aed7%2C33057%2C1731505105466.meta:.meta(num 1731505106611) 2024-11-13T13:38:27,065 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:38:27,065 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:38:27,065 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:38:27,065 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:38:27,065 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:38:27,067 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37879 is added to blk_1073741833_1009 (size=93) 2024-11-13T13:38:27,067 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38931 is added to blk_1073741833_1009 (size=93) 2024-11-13T13:38:27,069 DEBUG [RS:0;bfeb2336aed7:33057 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/d11e75e3-2f35-680e-6fac-df99cc3f5416/oldWALs 2024-11-13T13:38:27,069 INFO [RS:0;bfeb2336aed7:33057 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog bfeb2336aed7%2C33057%2C1731505105466:(num 1731505106191) 2024-11-13T13:38:27,069 DEBUG [RS:0;bfeb2336aed7:33057 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T13:38:27,069 INFO [RS:0;bfeb2336aed7:33057 {}] regionserver.LeaseManager(133): Closed leases 2024-11-13T13:38:27,069 INFO [RS:0;bfeb2336aed7:33057 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-13T13:38:27,069 INFO [RS:0;bfeb2336aed7:33057 {}] hbase.ChoreService(370): Chore service for: regionserver/bfeb2336aed7:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-13T13:38:27,069 INFO [RS:0;bfeb2336aed7:33057 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-13T13:38:27,069 INFO [regionserver/bfeb2336aed7:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-13T13:38:27,069 INFO [RS:0;bfeb2336aed7:33057 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:33057 2024-11-13T13:38:27,081 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33057-0x101346b24b80001, quorum=127.0.0.1:58622, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/bfeb2336aed7,33057,1731505105466 2024-11-13T13:38:27,081 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38711-0x101346b24b80000, quorum=127.0.0.1:58622, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-13T13:38:27,081 INFO [RS:0;bfeb2336aed7:33057 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-13T13:38:27,091 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [bfeb2336aed7,33057,1731505105466] 2024-11-13T13:38:27,102 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/bfeb2336aed7,33057,1731505105466 already deleted, retry=false 2024-11-13T13:38:27,102 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; bfeb2336aed7,33057,1731505105466 expired; onlineServers=0 2024-11-13T13:38:27,102 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'bfeb2336aed7,38711,1731505105176' ***** 2024-11-13T13:38:27,102 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-13T13:38:27,102 INFO [M:0;bfeb2336aed7:38711 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-13T13:38:27,102 INFO [M:0;bfeb2336aed7:38711 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-13T13:38:27,102 DEBUG [M:0;bfeb2336aed7:38711 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-13T13:38:27,102 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-13T13:38:27,102 DEBUG [M:0;bfeb2336aed7:38711 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-13T13:38:27,102 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster-HFileCleaner.large.0-1731505105929 {}] cleaner.HFileCleaner(306): Exit Thread[master/bfeb2336aed7:0:becomeActiveMaster-HFileCleaner.large.0-1731505105929,5,FailOnTimeoutGroup] 2024-11-13T13:38:27,102 DEBUG [master/bfeb2336aed7:0:becomeActiveMaster-HFileCleaner.small.0-1731505105929 {}] cleaner.HFileCleaner(306): Exit Thread[master/bfeb2336aed7:0:becomeActiveMaster-HFileCleaner.small.0-1731505105929,5,FailOnTimeoutGroup] 2024-11-13T13:38:27,102 INFO [M:0;bfeb2336aed7:38711 {}] hbase.ChoreService(370): Chore service for: master/bfeb2336aed7:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-13T13:38:27,102 INFO [M:0;bfeb2336aed7:38711 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-13T13:38:27,102 DEBUG [M:0;bfeb2336aed7:38711 {}] master.HMaster(1795): Stopping service threads 2024-11-13T13:38:27,102 INFO [M:0;bfeb2336aed7:38711 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-13T13:38:27,102 INFO [M:0;bfeb2336aed7:38711 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-13T13:38:27,103 INFO [M:0;bfeb2336aed7:38711 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-13T13:38:27,103 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-13T13:38:27,112 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38711-0x101346b24b80000, quorum=127.0.0.1:58622, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-13T13:38:27,112 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38711-0x101346b24b80000, quorum=127.0.0.1:58622, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T13:38:27,123 DEBUG [M:0;bfeb2336aed7:38711 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/master already deleted, retry=false 2024-11-13T13:38:27,123 DEBUG [M:0;bfeb2336aed7:38711 {}] master.ActiveMasterManager(353): master:38711-0x101346b24b80000, quorum=127.0.0.1:58622, baseZNode=/hbase Failed delete of our master address node; KeeperErrorCode = NoNode for /hbase/master 2024-11-13T13:38:27,123 INFO [M:0;bfeb2336aed7:38711 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:35503/user/jenkins/test-data/d11e75e3-2f35-680e-6fac-df99cc3f5416/.lastflushedseqids 2024-11-13T13:38:27,128 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37879 is added to blk_1073741838_1014 (size=99) 2024-11-13T13:38:27,128 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38931 is added to blk_1073741838_1014 (size=99) 2024-11-13T13:38:27,128 INFO [M:0;bfeb2336aed7:38711 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-13T13:38:27,128 INFO [M:0;bfeb2336aed7:38711 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-13T13:38:27,129 DEBUG [M:0;bfeb2336aed7:38711 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-13T13:38:27,129 INFO [M:0;bfeb2336aed7:38711 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T13:38:27,129 DEBUG [M:0;bfeb2336aed7:38711 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T13:38:27,129 DEBUG [M:0;bfeb2336aed7:38711 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-13T13:38:27,129 DEBUG [M:0;bfeb2336aed7:38711 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T13:38:27,129 INFO [M:0;bfeb2336aed7:38711 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=7.67 KB heapSize=11.34 KB 2024-11-13T13:38:27,149 DEBUG [M:0;bfeb2336aed7:38711 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35503/user/jenkins/test-data/d11e75e3-2f35-680e-6fac-df99cc3f5416/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/64dc79d8924a4a05b0d5922eb51a3672 is 82, key is hbase:meta,,1/info:regioninfo/1731505106653/Put/seqid=0 2024-11-13T13:38:27,153 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38931 is added to blk_1073741839_1015 (size=5672) 2024-11-13T13:38:27,153 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37879 is added to blk_1073741839_1015 (size=5672) 2024-11-13T13:38:27,154 INFO [M:0;bfeb2336aed7:38711 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:35503/user/jenkins/test-data/d11e75e3-2f35-680e-6fac-df99cc3f5416/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/64dc79d8924a4a05b0d5922eb51a3672 2024-11-13T13:38:27,171 DEBUG [M:0;bfeb2336aed7:38711 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35503/user/jenkins/test-data/d11e75e3-2f35-680e-6fac-df99cc3f5416/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/0ced9b8678fe4bea9fb50f9f23c2fab2 is 240, key is \x00\x00\x00\x00\x00\x00\x00\x01/proc:d/1731505106715/Put/seqid=0 2024-11-13T13:38:27,175 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37879 is added to blk_1073741840_1016 (size=5275) 2024-11-13T13:38:27,175 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38931 is added to blk_1073741840_1016 (size=5275) 2024-11-13T13:38:27,175 INFO [M:0;bfeb2336aed7:38711 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.06 KB at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:35503/user/jenkins/test-data/d11e75e3-2f35-680e-6fac-df99cc3f5416/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/0ced9b8678fe4bea9fb50f9f23c2fab2 2024-11-13T13:38:27,191 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33057-0x101346b24b80001, quorum=127.0.0.1:58622, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-13T13:38:27,191 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33057-0x101346b24b80001, quorum=127.0.0.1:58622, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-13T13:38:27,192 INFO [RS:0;bfeb2336aed7:33057 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-13T13:38:27,192 INFO [RS:0;bfeb2336aed7:33057 {}] regionserver.HRegionServer(1031): Exiting; stopping=bfeb2336aed7,33057,1731505105466; zookeeper connection closed. 2024-11-13T13:38:27,192 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@529a3165 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@529a3165 2024-11-13T13:38:27,192 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-13T13:38:27,196 DEBUG [M:0;bfeb2336aed7:38711 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35503/user/jenkins/test-data/d11e75e3-2f35-680e-6fac-df99cc3f5416/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/b8d39c51cb4a4f22a8a1c14f196b9d1d is 69, key is bfeb2336aed7,33057,1731505105466/rs:state/1731505106042/Put/seqid=0 2024-11-13T13:38:27,200 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,38781,1731504892507/bfeb2336aed7%2C38781%2C1731504892507.1731504892747 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:38:27,200 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40141/user/jenkins/test-data/2f1d1f3b-cd32-992f-e1e0-0e77f6d7e239/WALs/bfeb2336aed7,33523,1731504891245/bfeb2336aed7%2C33523%2C1731504891245.meta.1731504892258.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T13:38:27,204 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37879 is added to blk_1073741841_1017 (size=5156) 2024-11-13T13:38:27,205 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38931 is added to blk_1073741841_1017 (size=5156) 2024-11-13T13:38:27,205 INFO [M:0;bfeb2336aed7:38711 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:35503/user/jenkins/test-data/d11e75e3-2f35-680e-6fac-df99cc3f5416/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/b8d39c51cb4a4f22a8a1c14f196b9d1d 2024-11-13T13:38:27,222 DEBUG [M:0;bfeb2336aed7:38711 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35503/user/jenkins/test-data/d11e75e3-2f35-680e-6fac-df99cc3f5416/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/19ae6b951b864687b0b22541e6e01f16 is 52, key is load_balancer_on/state:d/1731505106809/Put/seqid=0 2024-11-13T13:38:27,228 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38931 is added to blk_1073741842_1018 (size=5056) 2024-11-13T13:38:27,228 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37879 is added to blk_1073741842_1018 (size=5056) 2024-11-13T13:38:27,229 INFO [M:0;bfeb2336aed7:38711 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:35503/user/jenkins/test-data/d11e75e3-2f35-680e-6fac-df99cc3f5416/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/19ae6b951b864687b0b22541e6e01f16 2024-11-13T13:38:27,233 DEBUG [M:0;bfeb2336aed7:38711 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35503/user/jenkins/test-data/d11e75e3-2f35-680e-6fac-df99cc3f5416/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/64dc79d8924a4a05b0d5922eb51a3672 as hdfs://localhost:35503/user/jenkins/test-data/d11e75e3-2f35-680e-6fac-df99cc3f5416/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/64dc79d8924a4a05b0d5922eb51a3672 2024-11-13T13:38:27,237 INFO [M:0;bfeb2336aed7:38711 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35503/user/jenkins/test-data/d11e75e3-2f35-680e-6fac-df99cc3f5416/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/64dc79d8924a4a05b0d5922eb51a3672, entries=8, sequenceid=29, filesize=5.5 K 2024-11-13T13:38:27,238 DEBUG [M:0;bfeb2336aed7:38711 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35503/user/jenkins/test-data/d11e75e3-2f35-680e-6fac-df99cc3f5416/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/0ced9b8678fe4bea9fb50f9f23c2fab2 as hdfs://localhost:35503/user/jenkins/test-data/d11e75e3-2f35-680e-6fac-df99cc3f5416/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/0ced9b8678fe4bea9fb50f9f23c2fab2 2024-11-13T13:38:27,244 INFO [M:0;bfeb2336aed7:38711 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35503/user/jenkins/test-data/d11e75e3-2f35-680e-6fac-df99cc3f5416/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/0ced9b8678fe4bea9fb50f9f23c2fab2, entries=3, sequenceid=29, filesize=5.2 K 2024-11-13T13:38:27,245 DEBUG [M:0;bfeb2336aed7:38711 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35503/user/jenkins/test-data/d11e75e3-2f35-680e-6fac-df99cc3f5416/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/b8d39c51cb4a4f22a8a1c14f196b9d1d as hdfs://localhost:35503/user/jenkins/test-data/d11e75e3-2f35-680e-6fac-df99cc3f5416/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/b8d39c51cb4a4f22a8a1c14f196b9d1d 2024-11-13T13:38:27,251 INFO [M:0;bfeb2336aed7:38711 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35503/user/jenkins/test-data/d11e75e3-2f35-680e-6fac-df99cc3f5416/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/b8d39c51cb4a4f22a8a1c14f196b9d1d, entries=1, sequenceid=29, filesize=5.0 K 2024-11-13T13:38:27,252 DEBUG [M:0;bfeb2336aed7:38711 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35503/user/jenkins/test-data/d11e75e3-2f35-680e-6fac-df99cc3f5416/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/19ae6b951b864687b0b22541e6e01f16 as hdfs://localhost:35503/user/jenkins/test-data/d11e75e3-2f35-680e-6fac-df99cc3f5416/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/19ae6b951b864687b0b22541e6e01f16 2024-11-13T13:38:27,257 INFO [M:0;bfeb2336aed7:38711 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35503/user/jenkins/test-data/d11e75e3-2f35-680e-6fac-df99cc3f5416/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/19ae6b951b864687b0b22541e6e01f16, entries=1, sequenceid=29, filesize=4.9 K 2024-11-13T13:38:27,258 INFO [M:0;bfeb2336aed7:38711 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 129ms, sequenceid=29, compaction requested=false 2024-11-13T13:38:27,260 INFO [M:0;bfeb2336aed7:38711 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T13:38:27,260 DEBUG [M:0;bfeb2336aed7:38711 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731505107129Disabling compacts and flushes for region at 1731505107129Disabling writes for close at 1731505107129Obtaining lock to block concurrent updates at 1731505107129Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731505107129Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=7850, getHeapSize=11544, getOffHeapSize=0, getCellsCount=36 at 1731505107129Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731505107130 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731505107130Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731505107148 (+18 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731505107148Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731505107158 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731505107170 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731505107170Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731505107179 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731505107195 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731505107195Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731505107208 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731505107222 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731505107222Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5f169350: reopening flushed file at 1731505107232 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@40450e42: reopening flushed file at 1731505107237 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@679969b4: reopening flushed file at 1731505107244 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@569b01a7: reopening flushed file at 1731505107251 (+7 ms)Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 129ms, sequenceid=29, compaction requested=false at 1731505107258 (+7 ms)Writing region close event to WAL at 1731505107259 (+1 ms)Closed at 1731505107259 2024-11-13T13:38:27,260 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:38:27,260 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:38:27,260 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:38:27,260 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:38:27,260 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T13:38:27,262 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37879 is added to blk_1073741830_1006 (size=10311) 2024-11-13T13:38:27,264 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38931 is added to blk_1073741830_1006 (size=10311) 2024-11-13T13:38:27,265 INFO [M:0;bfeb2336aed7:38711 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-13T13:38:27,265 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-13T13:38:27,265 INFO [M:0;bfeb2336aed7:38711 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:38711 2024-11-13T13:38:27,265 INFO [M:0;bfeb2336aed7:38711 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-13T13:38:27,370 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38711-0x101346b24b80000, quorum=127.0.0.1:58622, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-13T13:38:27,370 INFO [M:0;bfeb2336aed7:38711 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-13T13:38:27,370 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38711-0x101346b24b80000, quorum=127.0.0.1:58622, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-13T13:38:27,374 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@49e6dd92{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-13T13:38:27,375 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@403020f8{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-13T13:38:27,375 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-13T13:38:27,375 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@44402286{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-13T13:38:27,375 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@64440bf6{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6e1be1b2-7c49-f6df-9c11-a537555bfd83/hadoop.log.dir/,STOPPED} 2024-11-13T13:38:27,377 WARN [BP-1903793704-172.17.0.2-1731505102063 heartbeating to localhost/127.0.0.1:35503 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-13T13:38:27,377 WARN [BP-1903793704-172.17.0.2-1731505102063 heartbeating to localhost/127.0.0.1:35503 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1903793704-172.17.0.2-1731505102063 (Datanode Uuid 26f69e6d-685a-4c7c-9eaa-cccb4cbd1612) service to localhost/127.0.0.1:35503 2024-11-13T13:38:27,377 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-13T13:38:27,377 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-13T13:38:27,378 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6e1be1b2-7c49-f6df-9c11-a537555bfd83/cluster_6b760dc0-6066-0ecf-7b1d-b58b7fe4d93c/data/data3/current/BP-1903793704-172.17.0.2-1731505102063 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-13T13:38:27,378 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6e1be1b2-7c49-f6df-9c11-a537555bfd83/cluster_6b760dc0-6066-0ecf-7b1d-b58b7fe4d93c/data/data4/current/BP-1903793704-172.17.0.2-1731505102063 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-13T13:38:27,379 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-13T13:38:27,382 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7fcd61c6{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-13T13:38:27,382 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@2ec1c28e{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-13T13:38:27,382 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-13T13:38:27,382 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@b0e389f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-13T13:38:27,382 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@413a6699{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6e1be1b2-7c49-f6df-9c11-a537555bfd83/hadoop.log.dir/,STOPPED} 2024-11-13T13:38:27,383 WARN [BP-1903793704-172.17.0.2-1731505102063 heartbeating to localhost/127.0.0.1:35503 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-13T13:38:27,384 WARN [BP-1903793704-172.17.0.2-1731505102063 heartbeating to localhost/127.0.0.1:35503 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1903793704-172.17.0.2-1731505102063 (Datanode Uuid 21cbae6f-4761-4b70-b554-95e162128341) service to localhost/127.0.0.1:35503 2024-11-13T13:38:27,384 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-13T13:38:27,384 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-13T13:38:27,384 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6e1be1b2-7c49-f6df-9c11-a537555bfd83/cluster_6b760dc0-6066-0ecf-7b1d-b58b7fe4d93c/data/data1/current/BP-1903793704-172.17.0.2-1731505102063 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-13T13:38:27,384 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6e1be1b2-7c49-f6df-9c11-a537555bfd83/cluster_6b760dc0-6066-0ecf-7b1d-b58b7fe4d93c/data/data2/current/BP-1903793704-172.17.0.2-1731505102063 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-13T13:38:27,384 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-13T13:38:27,388 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@35c58925{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-13T13:38:27,389 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@69a2ae1b{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-13T13:38:27,389 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-13T13:38:27,389 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@35d31c30{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-13T13:38:27,389 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@717a950c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6e1be1b2-7c49-f6df-9c11-a537555bfd83/hadoop.log.dir/,STOPPED} 2024-11-13T13:38:27,393 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-13T13:38:27,408 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-13T13:38:27,416 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnNothingWritten Thread=270 (was 230) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:35503 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-44-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-43-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-43-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:35503 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-44-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:35503 from jenkins.hfs.7 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-16-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:35503 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-45-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-16-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-42-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:35503 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-45-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-42-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:35503 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-45-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-42-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.7@localhost:35503 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-43-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-44-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:35503 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: HMaster-EventLoopGroup-16-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=537 (was 512) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=201 (was 174) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=4049 (was 4300)