2024-12-02 21:06:29,254 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@55de24cc 2024-12-02 21:06:29,268 main DEBUG Took 0.012010 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-12-02 21:06:29,268 main DEBUG PluginManager 'Core' found 129 plugins 2024-12-02 21:06:29,269 main DEBUG PluginManager 'Level' found 0 plugins 2024-12-02 21:06:29,270 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-12-02 21:06:29,271 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-02 21:06:29,278 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-12-02 21:06:29,291 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-02 21:06:29,293 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-02 21:06:29,294 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-02 21:06:29,295 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-02 21:06:29,295 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-02 21:06:29,296 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-02 21:06:29,297 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-02 21:06:29,297 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-02 21:06:29,297 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-02 21:06:29,298 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-02 21:06:29,299 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-02 21:06:29,299 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-02 21:06:29,299 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-02 21:06:29,300 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-02 21:06:29,300 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-02 21:06:29,300 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-02 21:06:29,301 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-02 21:06:29,301 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-02 21:06:29,301 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-02 21:06:29,302 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-02 21:06:29,302 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-02 21:06:29,302 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-02 21:06:29,303 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-02 21:06:29,303 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-02 21:06:29,303 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-02 21:06:29,304 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-12-02 21:06:29,306 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-02 21:06:29,307 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-12-02 21:06:29,309 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-12-02 21:06:29,310 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-12-02 21:06:29,311 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-12-02 21:06:29,311 main DEBUG PluginManager 'Converter' found 47 plugins 2024-12-02 21:06:29,321 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-12-02 21:06:29,323 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-12-02 21:06:29,325 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-12-02 21:06:29,325 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-12-02 21:06:29,325 main DEBUG createAppenders(={Console}) 2024-12-02 21:06:29,326 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@55de24cc initialized 2024-12-02 21:06:29,326 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@55de24cc 2024-12-02 21:06:29,326 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@55de24cc OK. 2024-12-02 21:06:29,327 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-12-02 21:06:29,327 main DEBUG OutputStream closed 2024-12-02 21:06:29,327 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-12-02 21:06:29,328 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-12-02 21:06:29,328 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@53ce1329 OK 2024-12-02 21:06:29,388 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-12-02 21:06:29,390 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-12-02 21:06:29,391 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-12-02 21:06:29,392 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-12-02 21:06:29,392 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-12-02 21:06:29,392 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-12-02 21:06:29,392 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-12-02 21:06:29,393 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-12-02 21:06:29,393 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-12-02 21:06:29,393 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-12-02 21:06:29,394 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-12-02 21:06:29,394 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-12-02 21:06:29,394 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-12-02 21:06:29,395 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-12-02 21:06:29,395 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-12-02 21:06:29,395 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-12-02 21:06:29,395 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-12-02 21:06:29,396 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-12-02 21:06:29,398 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-02 21:06:29,398 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-logging/target/hbase-logging-2.7.0-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@6dab9b6d) with optional ClassLoader: null 2024-12-02 21:06:29,398 main DEBUG Shutdown hook enabled. Registering a new one. 2024-12-02 21:06:29,399 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@6dab9b6d] started OK. 2024-12-02T21:06:29,612 DEBUG [main {}] hbase.HBaseTestingUtility(348): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/380baec2-f3d4-8625-7ce2-4399a87d15dd 2024-12-02 21:06:29,615 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-12-02 21:06:29,615 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-02T21:06:29,623 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.regionserver.wal.TestLogRolling timeout: 13 mins 2024-12-02T21:06:29,655 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testSlowSyncLogRolling Thread=12, OpenFileDescriptor=286, MaxFileDescriptor=1048576, SystemLoadAverage=294, ProcessCount=11, AvailableMemoryMB=8198 2024-12-02T21:06:29,658 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1126): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-02T21:06:29,660 INFO [Time-limited test {}] hbase.HBaseZKTestingUtility(82): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/380baec2-f3d4-8625-7ce2-4399a87d15dd/cluster_67c3d1bd-51e2-48dc-a4c9-8f1d1435c878, deleteOnExit=true 2024-12-02T21:06:29,661 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1140): STARTING DFS 2024-12-02T21:06:29,661 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/380baec2-f3d4-8625-7ce2-4399a87d15dd/test.cache.data in system properties and HBase conf 2024-12-02T21:06:29,662 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/380baec2-f3d4-8625-7ce2-4399a87d15dd/hadoop.tmp.dir in system properties and HBase conf 2024-12-02T21:06:29,662 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/380baec2-f3d4-8625-7ce2-4399a87d15dd/hadoop.log.dir in system properties and HBase conf 2024-12-02T21:06:29,663 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/380baec2-f3d4-8625-7ce2-4399a87d15dd/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-02T21:06:29,663 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/380baec2-f3d4-8625-7ce2-4399a87d15dd/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-02T21:06:29,663 INFO [Time-limited test {}] hbase.HBaseTestingUtility(811): read short circuit is OFF 2024-12-02T21:06:29,742 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-12-02T21:06:29,816 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-02T21:06:29,819 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/380baec2-f3d4-8625-7ce2-4399a87d15dd/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-02T21:06:29,819 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/380baec2-f3d4-8625-7ce2-4399a87d15dd/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-02T21:06:29,820 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/380baec2-f3d4-8625-7ce2-4399a87d15dd/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-02T21:06:29,820 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/380baec2-f3d4-8625-7ce2-4399a87d15dd/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-02T21:06:29,821 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/380baec2-f3d4-8625-7ce2-4399a87d15dd/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-02T21:06:29,821 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/380baec2-f3d4-8625-7ce2-4399a87d15dd/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-02T21:06:29,822 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/380baec2-f3d4-8625-7ce2-4399a87d15dd/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-02T21:06:29,822 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/380baec2-f3d4-8625-7ce2-4399a87d15dd/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-02T21:06:29,822 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/380baec2-f3d4-8625-7ce2-4399a87d15dd/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-02T21:06:29,823 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/380baec2-f3d4-8625-7ce2-4399a87d15dd/nfs.dump.dir in system properties and HBase conf 2024-12-02T21:06:29,823 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/380baec2-f3d4-8625-7ce2-4399a87d15dd/java.io.tmpdir in system properties and HBase conf 2024-12-02T21:06:29,823 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/380baec2-f3d4-8625-7ce2-4399a87d15dd/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-02T21:06:29,824 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/380baec2-f3d4-8625-7ce2-4399a87d15dd/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-02T21:06:29,824 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/380baec2-f3d4-8625-7ce2-4399a87d15dd/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-02T21:06:30,226 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-02T21:06:30,681 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-12-02T21:06:30,742 INFO [Time-limited test {}] log.Log(170): Logging initialized @2121ms to org.eclipse.jetty.util.log.Slf4jLog 2024-12-02T21:06:30,802 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-02T21:06:30,860 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-02T21:06:30,877 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-02T21:06:30,877 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-02T21:06:30,879 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-02T21:06:30,891 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-02T21:06:30,893 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@37896107{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/380baec2-f3d4-8625-7ce2-4399a87d15dd/hadoop.log.dir/,AVAILABLE} 2024-12-02T21:06:30,894 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7c7fe1c5{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-02T21:06:31,057 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4cd532bc{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/380baec2-f3d4-8625-7ce2-4399a87d15dd/java.io.tmpdir/jetty-localhost-38537-hadoop-hdfs-3_4_1-tests_jar-_-any-1624193180472792762/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-02T21:06:31,063 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@619b4309{HTTP/1.1, (http/1.1)}{localhost:38537} 2024-12-02T21:06:31,064 INFO [Time-limited test {}] server.Server(415): Started @2444ms 2024-12-02T21:06:31,090 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-02T21:06:31,539 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-02T21:06:31,547 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-02T21:06:31,548 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-02T21:06:31,548 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-02T21:06:31,548 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-02T21:06:31,549 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4239ce1e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/380baec2-f3d4-8625-7ce2-4399a87d15dd/hadoop.log.dir/,AVAILABLE} 2024-12-02T21:06:31,549 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4c2208{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-02T21:06:31,642 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@9f8c8e3{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/380baec2-f3d4-8625-7ce2-4399a87d15dd/java.io.tmpdir/jetty-localhost-41689-hadoop-hdfs-3_4_1-tests_jar-_-any-11997994700312500643/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-02T21:06:31,643 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@49793231{HTTP/1.1, (http/1.1)}{localhost:41689} 2024-12-02T21:06:31,643 INFO [Time-limited test {}] server.Server(415): Started @3023ms 2024-12-02T21:06:31,687 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-02T21:06:31,782 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-02T21:06:31,788 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-02T21:06:31,789 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-02T21:06:31,790 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-02T21:06:31,790 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-02T21:06:31,791 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2170f3b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/380baec2-f3d4-8625-7ce2-4399a87d15dd/hadoop.log.dir/,AVAILABLE} 2024-12-02T21:06:31,791 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@49fab02b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-02T21:06:31,915 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@63d8281c{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/380baec2-f3d4-8625-7ce2-4399a87d15dd/java.io.tmpdir/jetty-localhost-46773-hadoop-hdfs-3_4_1-tests_jar-_-any-1380128449026590900/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-02T21:06:31,915 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@55b489f0{HTTP/1.1, (http/1.1)}{localhost:46773} 2024-12-02T21:06:31,915 INFO [Time-limited test {}] server.Server(415): Started @3295ms 2024-12-02T21:06:31,917 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-02T21:06:32,675 WARN [Thread-98 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/380baec2-f3d4-8625-7ce2-4399a87d15dd/cluster_67c3d1bd-51e2-48dc-a4c9-8f1d1435c878/dfs/data/data3/current/BP-430769306-172.17.0.2-1733173590309/current, will proceed with Du for space computation calculation, 2024-12-02T21:06:32,675 WARN [Thread-99 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/380baec2-f3d4-8625-7ce2-4399a87d15dd/cluster_67c3d1bd-51e2-48dc-a4c9-8f1d1435c878/dfs/data/data4/current/BP-430769306-172.17.0.2-1733173590309/current, will proceed with Du for space computation calculation, 2024-12-02T21:06:32,675 WARN [Thread-100 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/380baec2-f3d4-8625-7ce2-4399a87d15dd/cluster_67c3d1bd-51e2-48dc-a4c9-8f1d1435c878/dfs/data/data1/current/BP-430769306-172.17.0.2-1733173590309/current, will proceed with Du for space computation calculation, 2024-12-02T21:06:32,675 WARN [Thread-101 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/380baec2-f3d4-8625-7ce2-4399a87d15dd/cluster_67c3d1bd-51e2-48dc-a4c9-8f1d1435c878/dfs/data/data2/current/BP-430769306-172.17.0.2-1733173590309/current, will proceed with Du for space computation calculation, 2024-12-02T21:06:32,706 WARN [Thread-81 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-02T21:06:32,706 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-02T21:06:32,752 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x94c1dc527894fd67 with lease ID 0xaa7c172499fb80a4: Processing first storage report for DS-f5629455-818a-43eb-b35c-bea4817169a9 from datanode DatanodeRegistration(127.0.0.1:33281, datanodeUuid=ddd49cb0-d098-4313-b5db-0c3aca344988, infoPort=46083, infoSecurePort=0, ipcPort=33159, storageInfo=lv=-57;cid=testClusterID;nsid=536982250;c=1733173590309) 2024-12-02T21:06:32,754 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x94c1dc527894fd67 with lease ID 0xaa7c172499fb80a4: from storage DS-f5629455-818a-43eb-b35c-bea4817169a9 node DatanodeRegistration(127.0.0.1:33281, datanodeUuid=ddd49cb0-d098-4313-b5db-0c3aca344988, infoPort=46083, infoSecurePort=0, ipcPort=33159, storageInfo=lv=-57;cid=testClusterID;nsid=536982250;c=1733173590309), blocks: 0, hasStaleStorage: true, processing time: 2 msecs, invalidatedBlocks: 0 2024-12-02T21:06:32,754 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x48a8a5f0642e1eaa with lease ID 0xaa7c172499fb80a5: Processing first storage report for DS-791e1d83-1ef3-4bad-8ad0-a2e3cf66b8a0 from datanode DatanodeRegistration(127.0.0.1:39177, datanodeUuid=50ad25c6-72f2-4a25-862d-e81d42b49b23, infoPort=45619, infoSecurePort=0, ipcPort=39171, storageInfo=lv=-57;cid=testClusterID;nsid=536982250;c=1733173590309) 2024-12-02T21:06:32,754 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x48a8a5f0642e1eaa with lease ID 0xaa7c172499fb80a5: from storage DS-791e1d83-1ef3-4bad-8ad0-a2e3cf66b8a0 node DatanodeRegistration(127.0.0.1:39177, datanodeUuid=50ad25c6-72f2-4a25-862d-e81d42b49b23, infoPort=45619, infoSecurePort=0, ipcPort=39171, storageInfo=lv=-57;cid=testClusterID;nsid=536982250;c=1733173590309), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-02T21:06:32,755 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x94c1dc527894fd67 with lease ID 0xaa7c172499fb80a4: Processing first storage report for DS-143725e0-7232-4854-8d92-d5b2e91dd923 from datanode DatanodeRegistration(127.0.0.1:33281, datanodeUuid=ddd49cb0-d098-4313-b5db-0c3aca344988, infoPort=46083, infoSecurePort=0, ipcPort=33159, storageInfo=lv=-57;cid=testClusterID;nsid=536982250;c=1733173590309) 2024-12-02T21:06:32,755 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x94c1dc527894fd67 with lease ID 0xaa7c172499fb80a4: from storage DS-143725e0-7232-4854-8d92-d5b2e91dd923 node DatanodeRegistration(127.0.0.1:33281, datanodeUuid=ddd49cb0-d098-4313-b5db-0c3aca344988, infoPort=46083, infoSecurePort=0, ipcPort=33159, storageInfo=lv=-57;cid=testClusterID;nsid=536982250;c=1733173590309), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-02T21:06:32,755 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x48a8a5f0642e1eaa with lease ID 0xaa7c172499fb80a5: Processing first storage report for DS-c92aecd6-c6d8-4b7b-bc31-7fef15202d2a from datanode DatanodeRegistration(127.0.0.1:39177, datanodeUuid=50ad25c6-72f2-4a25-862d-e81d42b49b23, infoPort=45619, infoSecurePort=0, ipcPort=39171, storageInfo=lv=-57;cid=testClusterID;nsid=536982250;c=1733173590309) 2024-12-02T21:06:32,755 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x48a8a5f0642e1eaa with lease ID 0xaa7c172499fb80a5: from storage DS-c92aecd6-c6d8-4b7b-bc31-7fef15202d2a node DatanodeRegistration(127.0.0.1:39177, datanodeUuid=50ad25c6-72f2-4a25-862d-e81d42b49b23, infoPort=45619, infoSecurePort=0, ipcPort=39171, storageInfo=lv=-57;cid=testClusterID;nsid=536982250;c=1733173590309), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-02T21:06:32,824 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(703): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/380baec2-f3d4-8625-7ce2-4399a87d15dd 2024-12-02T21:06:32,885 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(259): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/380baec2-f3d4-8625-7ce2-4399a87d15dd/cluster_67c3d1bd-51e2-48dc-a4c9-8f1d1435c878/zookeeper_0, clientPort=58068, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/380baec2-f3d4-8625-7ce2-4399a87d15dd/cluster_67c3d1bd-51e2-48dc-a4c9-8f1d1435c878/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/380baec2-f3d4-8625-7ce2-4399a87d15dd/cluster_67c3d1bd-51e2-48dc-a4c9-8f1d1435c878/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-02T21:06:32,895 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(284): Started MiniZooKeeperCluster and ran 'stat' on client port=58068 2024-12-02T21:06:32,909 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T21:06:32,912 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T21:06:33,114 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33281 is added to blk_1073741825_1001 (size=7) 2024-12-02T21:06:33,116 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39177 is added to blk_1073741825_1001 (size=7) 2024-12-02T21:06:33,527 INFO [Time-limited test {}] util.FSUtils(490): Created version file at hdfs://localhost:39905/user/jenkins/test-data/2cbae3b8-f761-787a-953c-0b58f120ed37 with version=8 2024-12-02T21:06:33,528 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1460): Setting hbase.fs.tmp.dir to hdfs://localhost:39905/user/jenkins/test-data/2cbae3b8-f761-787a-953c-0b58f120ed37/hbase-staging 2024-12-02T21:06:33,625 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-12-02T21:06:33,844 INFO [Time-limited test {}] client.ConnectionUtils(129): master/7d4f3b9a7081:0 server-side Connection retries=45 2024-12-02T21:06:33,859 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-02T21:06:33,859 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-02T21:06:33,860 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-02T21:06:33,860 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-02T21:06:33,860 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-02T21:06:33,964 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-02T21:06:34,011 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-12-02T21:06:34,018 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-12-02T21:06:34,021 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-02T21:06:34,041 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 16660 (auto-detected) 2024-12-02T21:06:34,042 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:02 (auto-detected) 2024-12-02T21:06:34,058 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:39967 2024-12-02T21:06:34,064 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T21:06:34,066 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T21:06:34,076 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=master:39967 connecting to ZooKeeper ensemble=127.0.0.1:58068 2024-12-02T21:06:34,174 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:399670x0, quorum=127.0.0.1:58068, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-02T21:06:34,177 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:39967-0x1019927e59a0000 connected 2024-12-02T21:06:34,247 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:39967-0x1019927e59a0000, quorum=127.0.0.1:58068, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-02T21:06:34,252 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:39967-0x1019927e59a0000, quorum=127.0.0.1:58068, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-02T21:06:34,255 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:39967-0x1019927e59a0000, quorum=127.0.0.1:58068, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-02T21:06:34,260 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=39967 2024-12-02T21:06:34,260 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=39967 2024-12-02T21:06:34,260 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=39967 2024-12-02T21:06:34,261 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=39967 2024-12-02T21:06:34,261 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=39967 2024-12-02T21:06:34,267 INFO [Time-limited test {}] master.HMaster(488): hbase.rootdir=hdfs://localhost:39905/user/jenkins/test-data/2cbae3b8-f761-787a-953c-0b58f120ed37, hbase.cluster.distributed=false 2024-12-02T21:06:34,324 INFO [Time-limited test {}] client.ConnectionUtils(129): regionserver/7d4f3b9a7081:0 server-side Connection retries=45 2024-12-02T21:06:34,324 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-02T21:06:34,324 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-02T21:06:34,324 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-02T21:06:34,324 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-02T21:06:34,324 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-02T21:06:34,326 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-02T21:06:34,328 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-02T21:06:34,329 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:41569 2024-12-02T21:06:34,331 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-02T21:06:34,335 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-02T21:06:34,336 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T21:06:34,339 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T21:06:34,343 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=regionserver:41569 connecting to ZooKeeper ensemble=127.0.0.1:58068 2024-12-02T21:06:34,353 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:415690x0, quorum=127.0.0.1:58068, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-02T21:06:34,354 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:415690x0, quorum=127.0.0.1:58068, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-02T21:06:34,354 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:41569-0x1019927e59a0001 connected 2024-12-02T21:06:34,355 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41569-0x1019927e59a0001, quorum=127.0.0.1:58068, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-02T21:06:34,356 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41569-0x1019927e59a0001, quorum=127.0.0.1:58068, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-02T21:06:34,357 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=41569 2024-12-02T21:06:34,358 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=41569 2024-12-02T21:06:34,358 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=41569 2024-12-02T21:06:34,359 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=41569 2024-12-02T21:06:34,360 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=41569 2024-12-02T21:06:34,362 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] master.HMaster(2445): Adding backup master ZNode /hbase/backup-masters/7d4f3b9a7081,39967,1733173593620 2024-12-02T21:06:34,374 DEBUG [M:0;7d4f3b9a7081:39967 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;7d4f3b9a7081:39967 2024-12-02T21:06:34,376 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41569-0x1019927e59a0001, quorum=127.0.0.1:58068, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-02T21:06:34,376 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39967-0x1019927e59a0000, quorum=127.0.0.1:58068, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-02T21:06:34,378 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:39967-0x1019927e59a0000, quorum=127.0.0.1:58068, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/7d4f3b9a7081,39967,1733173593620 2024-12-02T21:06:34,403 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41569-0x1019927e59a0001, quorum=127.0.0.1:58068, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-02T21:06:34,403 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39967-0x1019927e59a0000, quorum=127.0.0.1:58068, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-02T21:06:34,403 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41569-0x1019927e59a0001, quorum=127.0.0.1:58068, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T21:06:34,403 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39967-0x1019927e59a0000, quorum=127.0.0.1:58068, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T21:06:34,403 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:39967-0x1019927e59a0000, quorum=127.0.0.1:58068, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-02T21:06:34,404 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/7d4f3b9a7081,39967,1733173593620 from backup master directory 2024-12-02T21:06:34,404 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(111): master:39967-0x1019927e59a0000, quorum=127.0.0.1:58068, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-02T21:06:34,411 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39967-0x1019927e59a0000, quorum=127.0.0.1:58068, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/7d4f3b9a7081,39967,1733173593620 2024-12-02T21:06:34,411 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41569-0x1019927e59a0001, quorum=127.0.0.1:58068, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-02T21:06:34,411 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39967-0x1019927e59a0000, quorum=127.0.0.1:58068, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-02T21:06:34,412 WARN [master/7d4f3b9a7081:0:becomeActiveMaster {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-02T21:06:34,412 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=7d4f3b9a7081,39967,1733173593620 2024-12-02T21:06:34,415 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-12-02T21:06:34,416 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-12-02T21:06:34,481 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33281 is added to blk_1073741826_1002 (size=42) 2024-12-02T21:06:34,481 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39177 is added to blk_1073741826_1002 (size=42) 2024-12-02T21:06:34,484 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] util.FSUtils(639): Created cluster ID file at hdfs://localhost:39905/user/jenkins/test-data/2cbae3b8-f761-787a-953c-0b58f120ed37/hbase.id with ID: abd8f968-ff8b-4f3d-80d0-ac051fde3a41 2024-12-02T21:06:34,526 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T21:06:34,559 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41569-0x1019927e59a0001, quorum=127.0.0.1:58068, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T21:06:34,559 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39967-0x1019927e59a0000, quorum=127.0.0.1:58068, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T21:06:34,578 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39177 is added to blk_1073741827_1003 (size=196) 2024-12-02T21:06:34,579 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33281 is added to blk_1073741827_1003 (size=196) 2024-12-02T21:06:34,593 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] region.MasterRegion(372): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-02T21:06:34,594 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-02T21:06:34,599 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-02T21:06:34,625 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39177 is added to blk_1073741828_1004 (size=1189) 2024-12-02T21:06:34,625 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33281 is added to blk_1073741828_1004 (size=1189) 2024-12-02T21:06:34,642 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] regionserver.HRegion(7124): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:39905/user/jenkins/test-data/2cbae3b8-f761-787a-953c-0b58f120ed37/MasterData/data/master/store 2024-12-02T21:06:34,656 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39177 is added to blk_1073741829_1005 (size=34) 2024-12-02T21:06:34,657 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33281 is added to blk_1073741829_1005 (size=34) 2024-12-02T21:06:34,661 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-12-02T21:06:34,662 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] regionserver.HRegion(894): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T21:06:34,663 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] regionserver.HRegion(1681): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-02T21:06:34,663 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] regionserver.HRegion(1703): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-02T21:06:34,663 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] regionserver.HRegion(1724): Waiting without time limit for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-02T21:06:34,663 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] regionserver.HRegion(1791): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-02T21:06:34,664 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] regionserver.HRegion(1801): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-02T21:06:34,664 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] regionserver.HRegion(1922): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-02T21:06:34,664 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] regionserver.HRegion(1635): Region close journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-02T21:06:34,666 WARN [master/7d4f3b9a7081:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:39905/user/jenkins/test-data/2cbae3b8-f761-787a-953c-0b58f120ed37/MasterData/data/master/store/.initializing 2024-12-02T21:06:34,666 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:39905/user/jenkins/test-data/2cbae3b8-f761-787a-953c-0b58f120ed37/MasterData/WALs/7d4f3b9a7081,39967,1733173593620 2024-12-02T21:06:34,680 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=7d4f3b9a7081%2C39967%2C1733173593620, suffix=, logDir=hdfs://localhost:39905/user/jenkins/test-data/2cbae3b8-f761-787a-953c-0b58f120ed37/MasterData/WALs/7d4f3b9a7081,39967,1733173593620, archiveDir=hdfs://localhost:39905/user/jenkins/test-data/2cbae3b8-f761-787a-953c-0b58f120ed37/MasterData/oldWALs, maxLogs=10 2024-12-02T21:06:34,688 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7d4f3b9a7081%2C39967%2C1733173593620.1733173594686 2024-12-02T21:06:34,688 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] util.CommonFSUtils$DfsBuilderUtility(752): Using builder API via reflection for DFS file creation replicate flag. 2024-12-02T21:06:34,689 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] util.CommonFSUtils$DfsBuilderUtility(762): Using builder API via reflection for DFS file creation noLocalWrite flag. 2024-12-02T21:06:34,705 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/2cbae3b8-f761-787a-953c-0b58f120ed37/MasterData/WALs/7d4f3b9a7081,39967,1733173593620/7d4f3b9a7081%2C39967%2C1733173593620.1733173594686 2024-12-02T21:06:34,713 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45619:45619),(127.0.0.1/127.0.0.1:46083:46083)] 2024-12-02T21:06:34,714 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] regionserver.HRegion(7285): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-02T21:06:34,715 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] regionserver.HRegion(894): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T21:06:34,718 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] regionserver.HRegion(7327): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-02T21:06:34,719 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] regionserver.HRegion(7330): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-02T21:06:34,753 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-02T21:06:34,772 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-02T21:06:34,775 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T21:06:34,778 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-02T21:06:34,778 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-02T21:06:34,781 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-02T21:06:34,781 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T21:06:34,782 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T21:06:34,782 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-02T21:06:34,785 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-02T21:06:34,785 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T21:06:34,786 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T21:06:34,786 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-02T21:06:34,788 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-02T21:06:34,788 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T21:06:34,789 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T21:06:34,793 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:39905/user/jenkins/test-data/2cbae3b8-f761-787a-953c-0b58f120ed37/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-02T21:06:34,794 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:39905/user/jenkins/test-data/2cbae3b8-f761-787a-953c-0b58f120ed37/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-02T21:06:34,803 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-02T21:06:34,808 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] regionserver.HRegion(1085): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-02T21:06:34,812 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:39905/user/jenkins/test-data/2cbae3b8-f761-787a-953c-0b58f120ed37/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-02T21:06:34,813 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] regionserver.HRegion(1102): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=733719, jitterRate=-0.06702920794487}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-02T21:06:34,817 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] regionserver.HRegion(1001): Region open journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-02T21:06:34,818 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-02T21:06:34,843 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6b779ae4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T21:06:34,874 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] master.HMaster(882): No meta location available on zookeeper, skip migrating... 2024-12-02T21:06:34,885 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-02T21:06:34,885 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(633): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-02T21:06:34,887 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-02T21:06:34,889 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(653): Recovered RegionProcedureStore lease in 1 msec 2024-12-02T21:06:34,894 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(667): Loaded RegionProcedureStore in 5 msec 2024-12-02T21:06:34,894 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-02T21:06:34,915 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-02T21:06:34,927 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39967-0x1019927e59a0000, quorum=127.0.0.1:58068, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-02T21:06:34,936 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/balancer already deleted, retry=false 2024-12-02T21:06:34,939 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-02T21:06:34,941 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39967-0x1019927e59a0000, quorum=127.0.0.1:58068, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-02T21:06:34,951 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/normalizer already deleted, retry=false 2024-12-02T21:06:34,953 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-02T21:06:34,957 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39967-0x1019927e59a0000, quorum=127.0.0.1:58068, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-02T21:06:34,968 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/switch/split already deleted, retry=false 2024-12-02T21:06:34,969 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39967-0x1019927e59a0000, quorum=127.0.0.1:58068, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-02T21:06:34,978 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/switch/merge already deleted, retry=false 2024-12-02T21:06:34,990 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39967-0x1019927e59a0000, quorum=127.0.0.1:58068, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-02T21:06:34,994 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-02T21:06:35,003 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39967-0x1019927e59a0000, quorum=127.0.0.1:58068, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-02T21:06:35,003 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41569-0x1019927e59a0001, quorum=127.0.0.1:58068, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-02T21:06:35,003 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39967-0x1019927e59a0000, quorum=127.0.0.1:58068, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T21:06:35,003 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41569-0x1019927e59a0001, quorum=127.0.0.1:58068, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T21:06:35,004 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] master.HMaster(826): Active/primary master=7d4f3b9a7081,39967,1733173593620, sessionid=0x1019927e59a0000, setting cluster-up flag (Was=false) 2024-12-02T21:06:35,028 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39967-0x1019927e59a0000, quorum=127.0.0.1:58068, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T21:06:35,028 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41569-0x1019927e59a0001, quorum=127.0.0.1:58068, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T21:06:35,053 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-02T21:06:35,055 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=7d4f3b9a7081,39967,1733173593620 2024-12-02T21:06:35,078 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41569-0x1019927e59a0001, quorum=127.0.0.1:58068, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T21:06:35,078 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39967-0x1019927e59a0000, quorum=127.0.0.1:58068, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T21:06:35,103 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-02T21:06:35,104 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=7d4f3b9a7081,39967,1733173593620 2024-12-02T21:06:35,173 DEBUG [RS:0;7d4f3b9a7081:41569 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;7d4f3b9a7081:41569 2024-12-02T21:06:35,174 INFO [RS:0;7d4f3b9a7081:41569 {}] regionserver.HRegionServer(1008): ClusterId : abd8f968-ff8b-4f3d-80d0-ac051fde3a41 2024-12-02T21:06:35,176 DEBUG [RS:0;7d4f3b9a7081:41569 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-02T21:06:35,179 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT; InitMetaProcedure table=hbase:meta 2024-12-02T21:06:35,184 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(575): slop=0.2 2024-12-02T21:06:35,186 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(294): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-02T21:06:35,188 DEBUG [RS:0;7d4f3b9a7081:41569 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-02T21:06:35,188 DEBUG [RS:0;7d4f3b9a7081:41569 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-02T21:06:35,191 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] balancer.RegionLocationFinder(146): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 7d4f3b9a7081,39967,1733173593620 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-02T21:06:35,194 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/7d4f3b9a7081:0, corePoolSize=5, maxPoolSize=5 2024-12-02T21:06:35,194 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/7d4f3b9a7081:0, corePoolSize=5, maxPoolSize=5 2024-12-02T21:06:35,194 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/7d4f3b9a7081:0, corePoolSize=5, maxPoolSize=5 2024-12-02T21:06:35,194 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/7d4f3b9a7081:0, corePoolSize=5, maxPoolSize=5 2024-12-02T21:06:35,195 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/7d4f3b9a7081:0, corePoolSize=10, maxPoolSize=10 2024-12-02T21:06:35,195 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/7d4f3b9a7081:0, corePoolSize=1, maxPoolSize=1 2024-12-02T21:06:35,195 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/7d4f3b9a7081:0, corePoolSize=2, maxPoolSize=2 2024-12-02T21:06:35,195 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/7d4f3b9a7081:0, corePoolSize=1, maxPoolSize=1 2024-12-02T21:06:35,196 DEBUG [RS:0;7d4f3b9a7081:41569 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-02T21:06:35,196 DEBUG [RS:0;7d4f3b9a7081:41569 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@34eefd76, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T21:06:35,197 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733173625196 2024-12-02T21:06:35,198 DEBUG [RS:0;7d4f3b9a7081:41569 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4d40df23, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=7d4f3b9a7081/172.17.0.2:0 2024-12-02T21:06:35,198 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-02T21:06:35,199 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-02T21:06:35,200 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(96): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, locked=true; InitMetaProcedure table=hbase:meta 2024-12-02T21:06:35,201 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(75): BOOTSTRAP: creating hbase:meta region 2024-12-02T21:06:35,201 INFO [RS:0;7d4f3b9a7081:41569 {}] regionserver.RegionServerCoprocessorHost(67): System coprocessor loading is enabled 2024-12-02T21:06:35,201 INFO [RS:0;7d4f3b9a7081:41569 {}] regionserver.RegionServerCoprocessorHost(68): Table coprocessor loading is enabled 2024-12-02T21:06:35,201 DEBUG [RS:0;7d4f3b9a7081:41569 {}] regionserver.HRegionServer(1090): About to register with Master. 2024-12-02T21:06:35,202 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-02T21:06:35,203 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-02T21:06:35,203 INFO [RS:0;7d4f3b9a7081:41569 {}] regionserver.HRegionServer(3073): reportForDuty to master=7d4f3b9a7081,39967,1733173593620 with isa=7d4f3b9a7081/172.17.0.2:41569, startcode=1733173594323 2024-12-02T21:06:35,203 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-02T21:06:35,203 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-02T21:06:35,204 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-02T21:06:35,205 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T21:06:35,205 INFO [PEWorker-1 {}] util.FSTableDescriptors(133): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-02T21:06:35,206 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-02T21:06:35,207 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-02T21:06:35,208 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-02T21:06:35,211 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-02T21:06:35,211 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-02T21:06:35,212 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/7d4f3b9a7081:0:becomeActiveMaster-HFileCleaner.large.0-1733173595212,5,FailOnTimeoutGroup] 2024-12-02T21:06:35,212 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/7d4f3b9a7081:0:becomeActiveMaster-HFileCleaner.small.0-1733173595212,5,FailOnTimeoutGroup] 2024-12-02T21:06:35,212 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-02T21:06:35,213 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] master.HMaster(1680): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-02T21:06:35,214 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-02T21:06:35,214 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-02T21:06:35,215 DEBUG [RS:0;7d4f3b9a7081:41569 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-02T21:06:35,219 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33281 is added to blk_1073741831_1007 (size=1039) 2024-12-02T21:06:35,219 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39177 is added to blk_1073741831_1007 (size=1039) 2024-12-02T21:06:35,221 INFO [PEWorker-1 {}] util.FSTableDescriptors(140): Updated hbase:meta table descriptor to hdfs://localhost:39905/user/jenkins/test-data/2cbae3b8-f761-787a-953c-0b58f120ed37/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1039 2024-12-02T21:06:35,221 INFO [PEWorker-1 {}] regionserver.HRegion(7106): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:39905/user/jenkins/test-data/2cbae3b8-f761-787a-953c-0b58f120ed37 2024-12-02T21:06:35,233 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33281 is added to blk_1073741832_1008 (size=32) 2024-12-02T21:06:35,234 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39177 is added to blk_1073741832_1008 (size=32) 2024-12-02T21:06:35,275 INFO [RS-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33337, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-12-02T21:06:35,280 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39967 {}] master.ServerManager(332): Checking decommissioned status of RegionServer 7d4f3b9a7081,41569,1733173594323 2024-12-02T21:06:35,282 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39967 {}] master.ServerManager(486): Registering regionserver=7d4f3b9a7081,41569,1733173594323 2024-12-02T21:06:35,295 DEBUG [RS:0;7d4f3b9a7081:41569 {}] regionserver.HRegionServer(1725): Config from master: hbase.rootdir=hdfs://localhost:39905/user/jenkins/test-data/2cbae3b8-f761-787a-953c-0b58f120ed37 2024-12-02T21:06:35,295 DEBUG [RS:0;7d4f3b9a7081:41569 {}] regionserver.HRegionServer(1725): Config from master: fs.defaultFS=hdfs://localhost:39905 2024-12-02T21:06:35,295 DEBUG [RS:0;7d4f3b9a7081:41569 {}] regionserver.HRegionServer(1725): Config from master: hbase.master.info.port=-1 2024-12-02T21:06:35,303 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39967-0x1019927e59a0000, quorum=127.0.0.1:58068, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-02T21:06:35,304 DEBUG [RS:0;7d4f3b9a7081:41569 {}] zookeeper.ZKUtil(111): regionserver:41569-0x1019927e59a0001, quorum=127.0.0.1:58068, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/7d4f3b9a7081,41569,1733173594323 2024-12-02T21:06:35,304 WARN [RS:0;7d4f3b9a7081:41569 {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-02T21:06:35,304 INFO [RS:0;7d4f3b9a7081:41569 {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-02T21:06:35,304 DEBUG [RS:0;7d4f3b9a7081:41569 {}] regionserver.HRegionServer(2100): logDir=hdfs://localhost:39905/user/jenkins/test-data/2cbae3b8-f761-787a-953c-0b58f120ed37/WALs/7d4f3b9a7081,41569,1733173594323 2024-12-02T21:06:35,306 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [7d4f3b9a7081,41569,1733173594323] 2024-12-02T21:06:35,318 DEBUG [RS:0;7d4f3b9a7081:41569 {}] regionserver.Replication(140): Replication stats-in-log period=300 seconds 2024-12-02T21:06:35,328 INFO [RS:0;7d4f3b9a7081:41569 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-02T21:06:35,338 INFO [RS:0;7d4f3b9a7081:41569 {}] regionserver.MemStoreFlusher(130): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-02T21:06:35,340 INFO [RS:0;7d4f3b9a7081:41569 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-02T21:06:35,340 INFO [RS:0;7d4f3b9a7081:41569 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-02T21:06:35,341 INFO [RS:0;7d4f3b9a7081:41569 {}] regionserver.HRegionServer$CompactionChecker(1988): CompactionChecker runs every PT1S 2024-12-02T21:06:35,346 INFO [RS:0;7d4f3b9a7081:41569 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-02T21:06:35,346 DEBUG [RS:0;7d4f3b9a7081:41569 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/7d4f3b9a7081:0, corePoolSize=1, maxPoolSize=1 2024-12-02T21:06:35,346 DEBUG [RS:0;7d4f3b9a7081:41569 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/7d4f3b9a7081:0, corePoolSize=1, maxPoolSize=1 2024-12-02T21:06:35,346 DEBUG [RS:0;7d4f3b9a7081:41569 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/7d4f3b9a7081:0, corePoolSize=1, maxPoolSize=1 2024-12-02T21:06:35,346 DEBUG [RS:0;7d4f3b9a7081:41569 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/7d4f3b9a7081:0, corePoolSize=1, maxPoolSize=1 2024-12-02T21:06:35,347 DEBUG [RS:0;7d4f3b9a7081:41569 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/7d4f3b9a7081:0, corePoolSize=1, maxPoolSize=1 2024-12-02T21:06:35,347 DEBUG [RS:0;7d4f3b9a7081:41569 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/7d4f3b9a7081:0, corePoolSize=2, maxPoolSize=2 2024-12-02T21:06:35,347 DEBUG [RS:0;7d4f3b9a7081:41569 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/7d4f3b9a7081:0, corePoolSize=1, maxPoolSize=1 2024-12-02T21:06:35,347 DEBUG [RS:0;7d4f3b9a7081:41569 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/7d4f3b9a7081:0, corePoolSize=1, maxPoolSize=1 2024-12-02T21:06:35,347 DEBUG [RS:0;7d4f3b9a7081:41569 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/7d4f3b9a7081:0, corePoolSize=1, maxPoolSize=1 2024-12-02T21:06:35,347 DEBUG [RS:0;7d4f3b9a7081:41569 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/7d4f3b9a7081:0, corePoolSize=1, maxPoolSize=1 2024-12-02T21:06:35,347 DEBUG [RS:0;7d4f3b9a7081:41569 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/7d4f3b9a7081:0, corePoolSize=1, maxPoolSize=1 2024-12-02T21:06:35,347 DEBUG [RS:0;7d4f3b9a7081:41569 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/7d4f3b9a7081:0, corePoolSize=3, maxPoolSize=3 2024-12-02T21:06:35,348 DEBUG [RS:0;7d4f3b9a7081:41569 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/7d4f3b9a7081:0, corePoolSize=3, maxPoolSize=3 2024-12-02T21:06:35,348 INFO [RS:0;7d4f3b9a7081:41569 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-02T21:06:35,348 INFO [RS:0;7d4f3b9a7081:41569 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-02T21:06:35,348 INFO [RS:0;7d4f3b9a7081:41569 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-02T21:06:35,348 INFO [RS:0;7d4f3b9a7081:41569 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-02T21:06:35,348 INFO [RS:0;7d4f3b9a7081:41569 {}] hbase.ChoreService(168): Chore ScheduledChore name=7d4f3b9a7081,41569,1733173594323-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-02T21:06:35,362 INFO [RS:0;7d4f3b9a7081:41569 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-02T21:06:35,363 INFO [RS:0;7d4f3b9a7081:41569 {}] hbase.ChoreService(168): Chore ScheduledChore name=7d4f3b9a7081,41569,1733173594323-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-02T21:06:35,380 INFO [RS:0;7d4f3b9a7081:41569 {}] regionserver.Replication(204): 7d4f3b9a7081,41569,1733173594323 started 2024-12-02T21:06:35,380 INFO [RS:0;7d4f3b9a7081:41569 {}] regionserver.HRegionServer(1767): Serving as 7d4f3b9a7081,41569,1733173594323, RpcServer on 7d4f3b9a7081/172.17.0.2:41569, sessionid=0x1019927e59a0001 2024-12-02T21:06:35,381 DEBUG [RS:0;7d4f3b9a7081:41569 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-02T21:06:35,381 DEBUG [RS:0;7d4f3b9a7081:41569 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 7d4f3b9a7081,41569,1733173594323 2024-12-02T21:06:35,381 DEBUG [RS:0;7d4f3b9a7081:41569 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '7d4f3b9a7081,41569,1733173594323' 2024-12-02T21:06:35,381 DEBUG [RS:0;7d4f3b9a7081:41569 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-02T21:06:35,382 DEBUG [RS:0;7d4f3b9a7081:41569 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-02T21:06:35,383 DEBUG [RS:0;7d4f3b9a7081:41569 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-02T21:06:35,383 DEBUG [RS:0;7d4f3b9a7081:41569 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-02T21:06:35,383 DEBUG [RS:0;7d4f3b9a7081:41569 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 7d4f3b9a7081,41569,1733173594323 2024-12-02T21:06:35,383 DEBUG [RS:0;7d4f3b9a7081:41569 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '7d4f3b9a7081,41569,1733173594323' 2024-12-02T21:06:35,383 DEBUG [RS:0;7d4f3b9a7081:41569 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-02T21:06:35,384 DEBUG [RS:0;7d4f3b9a7081:41569 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-02T21:06:35,384 DEBUG [RS:0;7d4f3b9a7081:41569 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-02T21:06:35,384 INFO [RS:0;7d4f3b9a7081:41569 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-02T21:06:35,385 INFO [RS:0;7d4f3b9a7081:41569 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-02T21:06:35,497 INFO [RS:0;7d4f3b9a7081:41569 {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=7d4f3b9a7081%2C41569%2C1733173594323, suffix=, logDir=hdfs://localhost:39905/user/jenkins/test-data/2cbae3b8-f761-787a-953c-0b58f120ed37/WALs/7d4f3b9a7081,41569,1733173594323, archiveDir=hdfs://localhost:39905/user/jenkins/test-data/2cbae3b8-f761-787a-953c-0b58f120ed37/oldWALs, maxLogs=32 2024-12-02T21:06:35,500 INFO [RS:0;7d4f3b9a7081:41569 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7d4f3b9a7081%2C41569%2C1733173594323.1733173595500 2024-12-02T21:06:35,508 INFO [RS:0;7d4f3b9a7081:41569 {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/2cbae3b8-f761-787a-953c-0b58f120ed37/WALs/7d4f3b9a7081,41569,1733173594323/7d4f3b9a7081%2C41569%2C1733173594323.1733173595500 2024-12-02T21:06:35,508 DEBUG [RS:0;7d4f3b9a7081:41569 {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46083:46083),(127.0.0.1/127.0.0.1:45619:45619)] 2024-12-02T21:06:35,639 DEBUG [PEWorker-1 {}] regionserver.HRegion(894): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T21:06:35,643 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-02T21:06:35,646 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-02T21:06:35,647 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T21:06:35,648 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-02T21:06:35,648 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-02T21:06:35,651 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-02T21:06:35,652 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T21:06:35,653 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-02T21:06:35,653 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-02T21:06:35,656 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-02T21:06:35,657 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T21:06:35,658 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-02T21:06:35,659 DEBUG [PEWorker-1 {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:39905/user/jenkins/test-data/2cbae3b8-f761-787a-953c-0b58f120ed37/data/hbase/meta/1588230740 2024-12-02T21:06:35,660 DEBUG [PEWorker-1 {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:39905/user/jenkins/test-data/2cbae3b8-f761-787a-953c-0b58f120ed37/data/hbase/meta/1588230740 2024-12-02T21:06:35,663 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-02T21:06:35,665 DEBUG [PEWorker-1 {}] regionserver.HRegion(1085): writing seq id for 1588230740 2024-12-02T21:06:35,669 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:39905/user/jenkins/test-data/2cbae3b8-f761-787a-953c-0b58f120ed37/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-02T21:06:35,669 INFO [PEWorker-1 {}] regionserver.HRegion(1102): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=771031, jitterRate=-0.019583821296691895}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-02T21:06:35,671 DEBUG [PEWorker-1 {}] regionserver.HRegion(1001): Region open journal for 1588230740: 2024-12-02T21:06:35,671 DEBUG [PEWorker-1 {}] regionserver.HRegion(1681): Closing 1588230740, disabling compactions & flushes 2024-12-02T21:06:35,671 INFO [PEWorker-1 {}] regionserver.HRegion(1703): Closing region hbase:meta,,1.1588230740 2024-12-02T21:06:35,671 DEBUG [PEWorker-1 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:meta,,1.1588230740 2024-12-02T21:06:35,672 DEBUG [PEWorker-1 {}] regionserver.HRegion(1791): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-02T21:06:35,672 DEBUG [PEWorker-1 {}] regionserver.HRegion(1801): Updates disabled for region hbase:meta,,1.1588230740 2024-12-02T21:06:35,673 INFO [PEWorker-1 {}] regionserver.HRegion(1922): Closed hbase:meta,,1.1588230740 2024-12-02T21:06:35,673 DEBUG [PEWorker-1 {}] regionserver.HRegion(1635): Region close journal for 1588230740: 2024-12-02T21:06:35,675 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(96): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, locked=true; InitMetaProcedure table=hbase:meta 2024-12-02T21:06:35,675 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(107): Going to assign meta 2024-12-02T21:06:35,679 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-02T21:06:35,685 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-02T21:06:35,687 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-02T21:06:35,840 DEBUG [7d4f3b9a7081:39967 {}] assignment.AssignmentManager(2444): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-12-02T21:06:35,847 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=7d4f3b9a7081,41569,1733173594323 2024-12-02T21:06:35,853 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 7d4f3b9a7081,41569,1733173594323, state=OPENING 2024-12-02T21:06:35,895 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-02T21:06:35,903 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41569-0x1019927e59a0001, quorum=127.0.0.1:58068, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T21:06:35,903 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39967-0x1019927e59a0000, quorum=127.0.0.1:58068, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T21:06:35,905 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-02T21:06:35,905 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-02T21:06:35,908 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE; OpenRegionProcedure 1588230740, server=7d4f3b9a7081,41569,1733173594323}] 2024-12-02T21:06:36,088 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 7d4f3b9a7081,41569,1733173594323 2024-12-02T21:06:36,089 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-02T21:06:36,092 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48682, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-02T21:06:36,101 INFO [RS_OPEN_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(135): Open hbase:meta,,1.1588230740 2024-12-02T21:06:36,102 INFO [RS_OPEN_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-02T21:06:36,105 INFO [RS_OPEN_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=7d4f3b9a7081%2C41569%2C1733173594323.meta, suffix=.meta, logDir=hdfs://localhost:39905/user/jenkins/test-data/2cbae3b8-f761-787a-953c-0b58f120ed37/WALs/7d4f3b9a7081,41569,1733173594323, archiveDir=hdfs://localhost:39905/user/jenkins/test-data/2cbae3b8-f761-787a-953c-0b58f120ed37/oldWALs, maxLogs=32 2024-12-02T21:06:36,107 INFO [RS_OPEN_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 7d4f3b9a7081%2C41569%2C1733173594323.meta.1733173596107.meta 2024-12-02T21:06:36,115 INFO [RS_OPEN_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/2cbae3b8-f761-787a-953c-0b58f120ed37/WALs/7d4f3b9a7081,41569,1733173594323/7d4f3b9a7081%2C41569%2C1733173594323.meta.1733173596107.meta 2024-12-02T21:06:36,115 DEBUG [RS_OPEN_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45619:45619),(127.0.0.1/127.0.0.1:46083:46083)] 2024-12-02T21:06:36,116 DEBUG [RS_OPEN_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7285): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-02T21:06:36,117 DEBUG [RS_OPEN_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-02T21:06:36,163 DEBUG [RS_OPEN_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7999): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-02T21:06:36,166 INFO [RS_OPEN_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(436): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-02T21:06:36,170 DEBUG [RS_OPEN_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-02T21:06:36,170 DEBUG [RS_OPEN_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(894): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T21:06:36,170 DEBUG [RS_OPEN_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7327): checking encryption for 1588230740 2024-12-02T21:06:36,170 DEBUG [RS_OPEN_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7330): checking classloading for 1588230740 2024-12-02T21:06:36,173 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-02T21:06:36,174 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-02T21:06:36,175 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T21:06:36,175 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-02T21:06:36,176 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-02T21:06:36,177 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-02T21:06:36,177 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T21:06:36,178 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-02T21:06:36,178 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-02T21:06:36,179 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-02T21:06:36,179 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T21:06:36,180 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-02T21:06:36,182 DEBUG [RS_OPEN_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:39905/user/jenkins/test-data/2cbae3b8-f761-787a-953c-0b58f120ed37/data/hbase/meta/1588230740 2024-12-02T21:06:36,184 DEBUG [RS_OPEN_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:39905/user/jenkins/test-data/2cbae3b8-f761-787a-953c-0b58f120ed37/data/hbase/meta/1588230740 2024-12-02T21:06:36,187 DEBUG [RS_OPEN_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-02T21:06:36,190 DEBUG [RS_OPEN_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1085): writing seq id for 1588230740 2024-12-02T21:06:36,192 INFO [RS_OPEN_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1102): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=744493, jitterRate=-0.05332861840724945}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-02T21:06:36,194 DEBUG [RS_OPEN_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1001): Region open journal for 1588230740: 2024-12-02T21:06:36,200 INFO [RS_OPEN_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2601): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733173596082 2024-12-02T21:06:36,209 DEBUG [RS_OPEN_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2628): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-02T21:06:36,210 INFO [RS_OPEN_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(164): Opened hbase:meta,,1.1588230740 2024-12-02T21:06:36,210 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=7d4f3b9a7081,41569,1733173594323 2024-12-02T21:06:36,212 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 7d4f3b9a7081,41569,1733173594323, state=OPEN 2024-12-02T21:06:36,330 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41569-0x1019927e59a0001, quorum=127.0.0.1:58068, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-02T21:06:36,330 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39967-0x1019927e59a0000, quorum=127.0.0.1:58068, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-02T21:06:36,330 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-02T21:06:36,330 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-02T21:06:36,339 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=3, resume processing ppid=2 2024-12-02T21:06:36,339 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=3, ppid=2, state=SUCCESS; OpenRegionProcedure 1588230740, server=7d4f3b9a7081,41569,1733173594323 in 422 msec 2024-12-02T21:06:36,346 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=2, resume processing ppid=1 2024-12-02T21:06:36,346 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=2, ppid=1, state=SUCCESS; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 661 msec 2024-12-02T21:06:36,351 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=1, state=SUCCESS; InitMetaProcedure table=hbase:meta in 1.2100 sec 2024-12-02T21:06:36,351 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] master.HMaster(1088): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733173596351, completionTime=-1 2024-12-02T21:06:36,351 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] master.ServerManager(907): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-12-02T21:06:36,351 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] assignment.AssignmentManager(1747): Joining cluster... 2024-12-02T21:06:36,383 DEBUG [hconnection-0x1eb6fa4-shared-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T21:06:36,385 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48688, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T21:06:36,393 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] assignment.AssignmentManager(1759): Number of RegionServers=1 2024-12-02T21:06:36,394 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733173656394 2024-12-02T21:06:36,394 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733173716394 2024-12-02T21:06:36,394 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] assignment.AssignmentManager(1766): Joined the cluster in 42 msec 2024-12-02T21:06:36,430 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7d4f3b9a7081,39967,1733173593620-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-02T21:06:36,431 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7d4f3b9a7081,39967,1733173593620-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-02T21:06:36,431 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7d4f3b9a7081,39967,1733173593620-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-02T21:06:36,432 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-7d4f3b9a7081:39967, period=300000, unit=MILLISECONDS is enabled. 2024-12-02T21:06:36,433 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-02T21:06:36,438 DEBUG [master/7d4f3b9a7081:0.Chore.1 {}] janitor.CatalogJanitor(179): 2024-12-02T21:06:36,441 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] master.TableNamespaceManager(92): Namespace table not found. Creating... 2024-12-02T21:06:36,442 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] master.HMaster(2425): Client=null/null create 'hbase:namespace', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '10', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-02T21:06:36,447 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=hbase:namespace 2024-12-02T21:06:36,450 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_PRE_OPERATION 2024-12-02T21:06:36,451 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T21:06:36,452 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-02T21:06:36,464 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39177 is added to blk_1073741835_1011 (size=358) 2024-12-02T21:06:36,464 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33281 is added to blk_1073741835_1011 (size=358) 2024-12-02T21:06:36,467 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 53ecb1025a6e96f65e8ed6c8adf597f0, NAME => 'hbase:namespace,,1733173596441.53ecb1025a6e96f65e8ed6c8adf597f0.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:namespace', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '10', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:39905/user/jenkins/test-data/2cbae3b8-f761-787a-953c-0b58f120ed37 2024-12-02T21:06:36,477 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39177 is added to blk_1073741836_1012 (size=42) 2024-12-02T21:06:36,477 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33281 is added to blk_1073741836_1012 (size=42) 2024-12-02T21:06:36,478 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(894): Instantiated hbase:namespace,,1733173596441.53ecb1025a6e96f65e8ed6c8adf597f0.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T21:06:36,479 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1681): Closing 53ecb1025a6e96f65e8ed6c8adf597f0, disabling compactions & flushes 2024-12-02T21:06:36,479 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1703): Closing region hbase:namespace,,1733173596441.53ecb1025a6e96f65e8ed6c8adf597f0. 2024-12-02T21:06:36,479 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:namespace,,1733173596441.53ecb1025a6e96f65e8ed6c8adf597f0. 2024-12-02T21:06:36,479 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on hbase:namespace,,1733173596441.53ecb1025a6e96f65e8ed6c8adf597f0. after waiting 0 ms 2024-12-02T21:06:36,479 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region hbase:namespace,,1733173596441.53ecb1025a6e96f65e8ed6c8adf597f0. 2024-12-02T21:06:36,479 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1922): Closed hbase:namespace,,1733173596441.53ecb1025a6e96f65e8ed6c8adf597f0. 2024-12-02T21:06:36,479 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1635): Region close journal for 53ecb1025a6e96f65e8ed6c8adf597f0: 2024-12-02T21:06:36,481 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_ADD_TO_META 2024-12-02T21:06:36,487 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"hbase:namespace,,1733173596441.53ecb1025a6e96f65e8ed6c8adf597f0.","families":{"info":[{"qualifier":"regioninfo","vlen":41,"tag":[],"timestamp":"1733173596482"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733173596482"}]},"ts":"1733173596482"} 2024-12-02T21:06:36,508 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-02T21:06:36,511 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-02T21:06:36,513 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:namespace","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733173596511"}]},"ts":"1733173596511"} 2024-12-02T21:06:36,517 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:namespace, state=ENABLING in hbase:meta 2024-12-02T21:06:36,538 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:namespace, region=53ecb1025a6e96f65e8ed6c8adf597f0, ASSIGN}] 2024-12-02T21:06:36,540 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:namespace, region=53ecb1025a6e96f65e8ed6c8adf597f0, ASSIGN 2024-12-02T21:06:36,542 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=hbase:namespace, region=53ecb1025a6e96f65e8ed6c8adf597f0, ASSIGN; state=OFFLINE, location=7d4f3b9a7081,41569,1733173594323; forceNewPlan=false, retain=false 2024-12-02T21:06:36,693 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=5 updating hbase:meta row=53ecb1025a6e96f65e8ed6c8adf597f0, regionState=OPENING, regionLocation=7d4f3b9a7081,41569,1733173594323 2024-12-02T21:06:36,700 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE; OpenRegionProcedure 53ecb1025a6e96f65e8ed6c8adf597f0, server=7d4f3b9a7081,41569,1733173594323}] 2024-12-02T21:06:36,857 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 7d4f3b9a7081,41569,1733173594323 2024-12-02T21:06:36,869 INFO [RS_OPEN_PRIORITY_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(135): Open hbase:namespace,,1733173596441.53ecb1025a6e96f65e8ed6c8adf597f0. 2024-12-02T21:06:36,870 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7285): Opening region: {ENCODED => 53ecb1025a6e96f65e8ed6c8adf597f0, NAME => 'hbase:namespace,,1733173596441.53ecb1025a6e96f65e8ed6c8adf597f0.', STARTKEY => '', ENDKEY => ''} 2024-12-02T21:06:36,871 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table namespace 53ecb1025a6e96f65e8ed6c8adf597f0 2024-12-02T21:06:36,871 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(894): Instantiated hbase:namespace,,1733173596441.53ecb1025a6e96f65e8ed6c8adf597f0.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T21:06:36,871 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7327): checking encryption for 53ecb1025a6e96f65e8ed6c8adf597f0 2024-12-02T21:06:36,871 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7330): checking classloading for 53ecb1025a6e96f65e8ed6c8adf597f0 2024-12-02T21:06:36,873 INFO [StoreOpener-53ecb1025a6e96f65e8ed6c8adf597f0-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 53ecb1025a6e96f65e8ed6c8adf597f0 2024-12-02T21:06:36,876 INFO [StoreOpener-53ecb1025a6e96f65e8ed6c8adf597f0-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 53ecb1025a6e96f65e8ed6c8adf597f0 columnFamilyName info 2024-12-02T21:06:36,876 DEBUG [StoreOpener-53ecb1025a6e96f65e8ed6c8adf597f0-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T21:06:36,876 INFO [StoreOpener-53ecb1025a6e96f65e8ed6c8adf597f0-1 {}] regionserver.HStore(327): Store=53ecb1025a6e96f65e8ed6c8adf597f0/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T21:06:36,878 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:39905/user/jenkins/test-data/2cbae3b8-f761-787a-953c-0b58f120ed37/data/hbase/namespace/53ecb1025a6e96f65e8ed6c8adf597f0 2024-12-02T21:06:36,879 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:39905/user/jenkins/test-data/2cbae3b8-f761-787a-953c-0b58f120ed37/data/hbase/namespace/53ecb1025a6e96f65e8ed6c8adf597f0 2024-12-02T21:06:36,882 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1085): writing seq id for 53ecb1025a6e96f65e8ed6c8adf597f0 2024-12-02T21:06:36,886 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:39905/user/jenkins/test-data/2cbae3b8-f761-787a-953c-0b58f120ed37/data/hbase/namespace/53ecb1025a6e96f65e8ed6c8adf597f0/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-02T21:06:36,887 INFO [RS_OPEN_PRIORITY_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1102): Opened 53ecb1025a6e96f65e8ed6c8adf597f0; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=801661, jitterRate=0.019365057349205017}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-02T21:06:36,889 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1001): Region open journal for 53ecb1025a6e96f65e8ed6c8adf597f0: 2024-12-02T21:06:36,891 INFO [RS_OPEN_PRIORITY_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2601): Post open deploy tasks for hbase:namespace,,1733173596441.53ecb1025a6e96f65e8ed6c8adf597f0., pid=6, masterSystemTime=1733173596857 2024-12-02T21:06:36,894 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2628): Finished post open deploy task for hbase:namespace,,1733173596441.53ecb1025a6e96f65e8ed6c8adf597f0. 2024-12-02T21:06:36,894 INFO [RS_OPEN_PRIORITY_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(164): Opened hbase:namespace,,1733173596441.53ecb1025a6e96f65e8ed6c8adf597f0. 2024-12-02T21:06:36,895 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=5 updating hbase:meta row=53ecb1025a6e96f65e8ed6c8adf597f0, regionState=OPEN, openSeqNum=2, regionLocation=7d4f3b9a7081,41569,1733173594323 2024-12-02T21:06:36,902 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=6, resume processing ppid=5 2024-12-02T21:06:36,903 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=6, ppid=5, state=SUCCESS; OpenRegionProcedure 53ecb1025a6e96f65e8ed6c8adf597f0, server=7d4f3b9a7081,41569,1733173594323 in 200 msec 2024-12-02T21:06:36,905 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=5, resume processing ppid=4 2024-12-02T21:06:36,905 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=5, ppid=4, state=SUCCESS; TransitRegionStateProcedure table=hbase:namespace, region=53ecb1025a6e96f65e8ed6c8adf597f0, ASSIGN in 365 msec 2024-12-02T21:06:36,906 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-02T21:06:36,907 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:namespace","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733173596907"}]},"ts":"1733173596907"} 2024-12-02T21:06:36,910 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:namespace, state=ENABLED in hbase:meta 2024-12-02T21:06:36,947 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_POST_OPERATION 2024-12-02T21:06:36,951 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] zookeeper.ZKUtil(113): master:39967-0x1019927e59a0000, quorum=127.0.0.1:58068, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/namespace 2024-12-02T21:06:36,953 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=4, state=SUCCESS; CreateTableProcedure table=hbase:namespace in 504 msec 2024-12-02T21:06:36,961 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41569-0x1019927e59a0001, quorum=127.0.0.1:58068, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T21:06:36,961 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39967-0x1019927e59a0000, quorum=127.0.0.1:58068, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/namespace 2024-12-02T21:06:36,961 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39967-0x1019927e59a0000, quorum=127.0.0.1:58068, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T21:06:36,987 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=7, state=RUNNABLE:CREATE_NAMESPACE_PREPARE; CreateNamespaceProcedure, namespace=default 2024-12-02T21:06:37,011 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39967-0x1019927e59a0000, quorum=127.0.0.1:58068, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/namespace 2024-12-02T21:06:37,028 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=7, state=SUCCESS; CreateNamespaceProcedure, namespace=default in 39 msec 2024-12-02T21:06:37,033 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=8, state=RUNNABLE:CREATE_NAMESPACE_PREPARE; CreateNamespaceProcedure, namespace=hbase 2024-12-02T21:06:37,053 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39967-0x1019927e59a0000, quorum=127.0.0.1:58068, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/namespace 2024-12-02T21:06:37,066 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=8, state=SUCCESS; CreateNamespaceProcedure, namespace=hbase in 32 msec 2024-12-02T21:06:37,095 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39967-0x1019927e59a0000, quorum=127.0.0.1:58068, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/namespace/default 2024-12-02T21:06:37,111 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39967-0x1019927e59a0000, quorum=127.0.0.1:58068, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/namespace/hbase 2024-12-02T21:06:37,112 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] master.HMaster(1218): Master has completed initialization 2.700sec 2024-12-02T21:06:37,115 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-02T21:06:37,118 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-02T21:06:37,119 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-02T21:06:37,120 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-02T21:06:37,120 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-02T21:06:37,121 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7d4f3b9a7081,39967,1733173593620-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-02T21:06:37,121 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7d4f3b9a7081,39967,1733173593620-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-02T21:06:37,128 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] master.HMaster(1321): Balancer post startup initialization complete, took 0 seconds 2024-12-02T21:06:37,129 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-02T21:06:37,129 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7d4f3b9a7081,39967,1733173593620-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-02T21:06:37,196 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x744a1e4d to 127.0.0.1:58068 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2bc86121 2024-12-02T21:06:37,197 WARN [Time-limited test {}] client.ZKConnectionRegistry(90): ZKConnectionRegistry is deprecated. See https://hbase.apache.org/book.html#client.rpcconnectionregistry 2024-12-02T21:06:37,211 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4e9efc37, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T21:06:37,214 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-12-02T21:06:37,214 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-12-02T21:06:37,225 DEBUG [hconnection-0x663a2910-shared-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T21:06:37,251 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48704, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T21:06:37,262 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1199): Minicluster is up; activeMaster=7d4f3b9a7081,39967,1733173593620 2024-12-02T21:06:37,262 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T21:06:37,269 INFO [Time-limited test {}] master.MasterRpcServices(506): Client=null/null set balanceSwitch=false 2024-12-02T21:06:37,275 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-02T21:06:37,278 INFO [RS-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55364, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-02T21:06:37,283 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39967 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-12-02T21:06:37,283 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39967 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-12-02T21:06:37,286 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39967 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestLogRolling-testSlowSyncLogRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-02T21:06:37,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39967 {}] procedure2.ProcedureExecutor(1098): Stored pid=9, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling 2024-12-02T21:06:37,289 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-12-02T21:06:37,290 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T21:06:37,291 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-02T21:06:37,291 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39967 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testSlowSyncLogRolling" procId is: 9 2024-12-02T21:06:37,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39967 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-02T21:06:37,302 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39177 is added to blk_1073741837_1013 (size=389) 2024-12-02T21:06:37,302 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33281 is added to blk_1073741837_1013 (size=389) 2024-12-02T21:06:37,305 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 6ab24299f8b9a2ccc778b34be8469b97, NAME => 'TestLogRolling-testSlowSyncLogRolling,,1733173597282.6ab24299f8b9a2ccc778b34be8469b97.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testSlowSyncLogRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:39905/user/jenkins/test-data/2cbae3b8-f761-787a-953c-0b58f120ed37 2024-12-02T21:06:37,315 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39177 is added to blk_1073741838_1014 (size=72) 2024-12-02T21:06:37,316 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33281 is added to blk_1073741838_1014 (size=72) 2024-12-02T21:06:37,317 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(894): Instantiated TestLogRolling-testSlowSyncLogRolling,,1733173597282.6ab24299f8b9a2ccc778b34be8469b97.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T21:06:37,317 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1681): Closing 6ab24299f8b9a2ccc778b34be8469b97, disabling compactions & flushes 2024-12-02T21:06:37,317 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1703): Closing region TestLogRolling-testSlowSyncLogRolling,,1733173597282.6ab24299f8b9a2ccc778b34be8469b97. 2024-12-02T21:06:37,317 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestLogRolling-testSlowSyncLogRolling,,1733173597282.6ab24299f8b9a2ccc778b34be8469b97. 2024-12-02T21:06:37,317 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestLogRolling-testSlowSyncLogRolling,,1733173597282.6ab24299f8b9a2ccc778b34be8469b97. after waiting 0 ms 2024-12-02T21:06:37,317 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestLogRolling-testSlowSyncLogRolling,,1733173597282.6ab24299f8b9a2ccc778b34be8469b97. 2024-12-02T21:06:37,317 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1922): Closed TestLogRolling-testSlowSyncLogRolling,,1733173597282.6ab24299f8b9a2ccc778b34be8469b97. 2024-12-02T21:06:37,317 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1635): Region close journal for 6ab24299f8b9a2ccc778b34be8469b97: 2024-12-02T21:06:37,319 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_ADD_TO_META 2024-12-02T21:06:37,319 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestLogRolling-testSlowSyncLogRolling,,1733173597282.6ab24299f8b9a2ccc778b34be8469b97.","families":{"info":[{"qualifier":"regioninfo","vlen":71,"tag":[],"timestamp":"1733173597319"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733173597319"}]},"ts":"1733173597319"} 2024-12-02T21:06:37,322 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-02T21:06:37,323 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-02T21:06:37,323 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestLogRolling-testSlowSyncLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733173597323"}]},"ts":"1733173597323"} 2024-12-02T21:06:37,325 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestLogRolling-testSlowSyncLogRolling, state=ENABLING in hbase:meta 2024-12-02T21:06:37,344 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=6ab24299f8b9a2ccc778b34be8469b97, ASSIGN}] 2024-12-02T21:06:37,347 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=6ab24299f8b9a2ccc778b34be8469b97, ASSIGN 2024-12-02T21:06:37,349 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(264): Starting pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=6ab24299f8b9a2ccc778b34be8469b97, ASSIGN; state=OFFLINE, location=7d4f3b9a7081,41569,1733173594323; forceNewPlan=false, retain=false 2024-12-02T21:06:37,500 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=10 updating hbase:meta row=6ab24299f8b9a2ccc778b34be8469b97, regionState=OPENING, regionLocation=7d4f3b9a7081,41569,1733173594323 2024-12-02T21:06:37,507 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=11, ppid=10, state=RUNNABLE; OpenRegionProcedure 6ab24299f8b9a2ccc778b34be8469b97, server=7d4f3b9a7081,41569,1733173594323}] 2024-12-02T21:06:37,664 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 7d4f3b9a7081,41569,1733173594323 2024-12-02T21:06:37,675 INFO [RS_OPEN_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(135): Open TestLogRolling-testSlowSyncLogRolling,,1733173597282.6ab24299f8b9a2ccc778b34be8469b97. 2024-12-02T21:06:37,676 DEBUG [RS_OPEN_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7285): Opening region: {ENCODED => 6ab24299f8b9a2ccc778b34be8469b97, NAME => 'TestLogRolling-testSlowSyncLogRolling,,1733173597282.6ab24299f8b9a2ccc778b34be8469b97.', STARTKEY => '', ENDKEY => ''} 2024-12-02T21:06:37,676 DEBUG [RS_OPEN_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testSlowSyncLogRolling 6ab24299f8b9a2ccc778b34be8469b97 2024-12-02T21:06:37,676 DEBUG [RS_OPEN_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(894): Instantiated TestLogRolling-testSlowSyncLogRolling,,1733173597282.6ab24299f8b9a2ccc778b34be8469b97.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T21:06:37,677 DEBUG [RS_OPEN_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7327): checking encryption for 6ab24299f8b9a2ccc778b34be8469b97 2024-12-02T21:06:37,677 DEBUG [RS_OPEN_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7330): checking classloading for 6ab24299f8b9a2ccc778b34be8469b97 2024-12-02T21:06:37,679 INFO [StoreOpener-6ab24299f8b9a2ccc778b34be8469b97-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 6ab24299f8b9a2ccc778b34be8469b97 2024-12-02T21:06:37,681 INFO [StoreOpener-6ab24299f8b9a2ccc778b34be8469b97-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 6ab24299f8b9a2ccc778b34be8469b97 columnFamilyName info 2024-12-02T21:06:37,681 DEBUG [StoreOpener-6ab24299f8b9a2ccc778b34be8469b97-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T21:06:37,683 INFO [StoreOpener-6ab24299f8b9a2ccc778b34be8469b97-1 {}] regionserver.HStore(327): Store=6ab24299f8b9a2ccc778b34be8469b97/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T21:06:37,684 DEBUG [RS_OPEN_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:39905/user/jenkins/test-data/2cbae3b8-f761-787a-953c-0b58f120ed37/data/default/TestLogRolling-testSlowSyncLogRolling/6ab24299f8b9a2ccc778b34be8469b97 2024-12-02T21:06:37,685 DEBUG [RS_OPEN_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:39905/user/jenkins/test-data/2cbae3b8-f761-787a-953c-0b58f120ed37/data/default/TestLogRolling-testSlowSyncLogRolling/6ab24299f8b9a2ccc778b34be8469b97 2024-12-02T21:06:37,688 DEBUG [RS_OPEN_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1085): writing seq id for 6ab24299f8b9a2ccc778b34be8469b97 2024-12-02T21:06:37,691 DEBUG [RS_OPEN_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:39905/user/jenkins/test-data/2cbae3b8-f761-787a-953c-0b58f120ed37/data/default/TestLogRolling-testSlowSyncLogRolling/6ab24299f8b9a2ccc778b34be8469b97/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-02T21:06:37,692 INFO [RS_OPEN_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1102): Opened 6ab24299f8b9a2ccc778b34be8469b97; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=786670, jitterRate=3.03804874420166E-4}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-02T21:06:37,693 DEBUG [RS_OPEN_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1001): Region open journal for 6ab24299f8b9a2ccc778b34be8469b97: 2024-12-02T21:06:37,695 INFO [RS_OPEN_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2601): Post open deploy tasks for TestLogRolling-testSlowSyncLogRolling,,1733173597282.6ab24299f8b9a2ccc778b34be8469b97., pid=11, masterSystemTime=1733173597664 2024-12-02T21:06:37,698 DEBUG [RS_OPEN_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2628): Finished post open deploy task for TestLogRolling-testSlowSyncLogRolling,,1733173597282.6ab24299f8b9a2ccc778b34be8469b97. 2024-12-02T21:06:37,698 INFO [RS_OPEN_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(164): Opened TestLogRolling-testSlowSyncLogRolling,,1733173597282.6ab24299f8b9a2ccc778b34be8469b97. 2024-12-02T21:06:37,699 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=10 updating hbase:meta row=6ab24299f8b9a2ccc778b34be8469b97, regionState=OPEN, openSeqNum=2, regionLocation=7d4f3b9a7081,41569,1733173594323 2024-12-02T21:06:37,705 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=11, resume processing ppid=10 2024-12-02T21:06:37,706 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=11, ppid=10, state=SUCCESS; OpenRegionProcedure 6ab24299f8b9a2ccc778b34be8469b97, server=7d4f3b9a7081,41569,1733173594323 in 194 msec 2024-12-02T21:06:37,708 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=10, resume processing ppid=9 2024-12-02T21:06:37,708 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=10, ppid=9, state=SUCCESS; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=6ab24299f8b9a2ccc778b34be8469b97, ASSIGN in 361 msec 2024-12-02T21:06:37,709 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-02T21:06:37,709 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestLogRolling-testSlowSyncLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733173597709"}]},"ts":"1733173597709"} 2024-12-02T21:06:37,712 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestLogRolling-testSlowSyncLogRolling, state=ENABLED in hbase:meta 2024-12-02T21:06:37,755 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_POST_OPERATION 2024-12-02T21:06:37,758 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=9, state=SUCCESS; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling in 469 msec 2024-12-02T21:06:41,680 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-12-02T21:06:41,721 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-02T21:06:41,721 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:namespace' 2024-12-02T21:06:41,722 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testSlowSyncLogRolling' 2024-12-02T21:06:44,009 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-02T21:06:44,009 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-12-02T21:06:44,014 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling 2024-12-02T21:06:44,014 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling Metrics about Tables on a single HBase RegionServer 2024-12-02T21:06:44,018 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_namespace 2024-12-02T21:06:44,018 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_namespace Metrics about Tables on a single HBase RegionServer 2024-12-02T21:06:44,020 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-02T21:06:44,020 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-12-02T21:06:44,020 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-12-02T21:06:44,020 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-12-02T21:06:47,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39967 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-02T21:06:47,307 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestLogRolling-testSlowSyncLogRolling, procId: 9 completed 2024-12-02T21:06:47,310 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2718): Found 1 regions for table TestLogRolling-testSlowSyncLogRolling 2024-12-02T21:06:47,311 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2724): firstRegionName=TestLogRolling-testSlowSyncLogRolling,,1733173597282.6ab24299f8b9a2ccc778b34be8469b97. 2024-12-02T21:06:47,311 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7d4f3b9a7081%2C41569%2C1733173594323.1733173607311 2024-12-02T21:06:47,320 INFO [Time-limited test {}] wal.AbstractFSWAL(837): Rolled WAL /user/jenkins/test-data/2cbae3b8-f761-787a-953c-0b58f120ed37/WALs/7d4f3b9a7081,41569,1733173594323/7d4f3b9a7081%2C41569%2C1733173594323.1733173595500 with entries=4, filesize=947 B; new WAL /user/jenkins/test-data/2cbae3b8-f761-787a-953c-0b58f120ed37/WALs/7d4f3b9a7081,41569,1733173594323/7d4f3b9a7081%2C41569%2C1733173594323.1733173607311 2024-12-02T21:06:47,321 DEBUG [Time-limited test {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46083:46083),(127.0.0.1/127.0.0.1:45619:45619)] 2024-12-02T21:06:47,321 DEBUG [Time-limited test {}] wal.AbstractFSWAL(751): hdfs://localhost:39905/user/jenkins/test-data/2cbae3b8-f761-787a-953c-0b58f120ed37/WALs/7d4f3b9a7081,41569,1733173594323/7d4f3b9a7081%2C41569%2C1733173594323.1733173595500 is not closed yet, will try archiving it next time 2024-12-02T21:06:47,323 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39177 is added to blk_1073741833_1009 (size=955) 2024-12-02T21:06:47,324 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33281 is added to blk_1073741833_1009 (size=955) 2024-12-02T21:06:59,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41569 {}] regionserver.HRegion(8581): Flush requested on 6ab24299f8b9a2ccc778b34be8469b97 2024-12-02T21:06:59,369 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 6ab24299f8b9a2ccc778b34be8469b97 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-02T21:06:59,422 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39905/user/jenkins/test-data/2cbae3b8-f761-787a-953c-0b58f120ed37/data/default/TestLogRolling-testSlowSyncLogRolling/6ab24299f8b9a2ccc778b34be8469b97/.tmp/info/c2078ecf498445139bc1de1aaad66427 is 1080, key is row0001/info:/1733173607326/Put/seqid=0 2024-12-02T21:06:59,433 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39177 is added to blk_1073741840_1016 (size=12509) 2024-12-02T21:06:59,433 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33281 is added to blk_1073741840_1016 (size=12509) 2024-12-02T21:06:59,433 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:39905/user/jenkins/test-data/2cbae3b8-f761-787a-953c-0b58f120ed37/data/default/TestLogRolling-testSlowSyncLogRolling/6ab24299f8b9a2ccc778b34be8469b97/.tmp/info/c2078ecf498445139bc1de1aaad66427 2024-12-02T21:06:59,474 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39905/user/jenkins/test-data/2cbae3b8-f761-787a-953c-0b58f120ed37/data/default/TestLogRolling-testSlowSyncLogRolling/6ab24299f8b9a2ccc778b34be8469b97/.tmp/info/c2078ecf498445139bc1de1aaad66427 as hdfs://localhost:39905/user/jenkins/test-data/2cbae3b8-f761-787a-953c-0b58f120ed37/data/default/TestLogRolling-testSlowSyncLogRolling/6ab24299f8b9a2ccc778b34be8469b97/info/c2078ecf498445139bc1de1aaad66427 2024-12-02T21:06:59,484 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39905/user/jenkins/test-data/2cbae3b8-f761-787a-953c-0b58f120ed37/data/default/TestLogRolling-testSlowSyncLogRolling/6ab24299f8b9a2ccc778b34be8469b97/info/c2078ecf498445139bc1de1aaad66427, entries=7, sequenceid=11, filesize=12.2 K 2024-12-02T21:06:59,487 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 6ab24299f8b9a2ccc778b34be8469b97 in 118ms, sequenceid=11, compaction requested=false 2024-12-02T21:06:59,487 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 6ab24299f8b9a2ccc778b34be8469b97: 2024-12-02T21:07:02,820 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-02T21:07:06,203 DEBUG [BootstrapNodeManager {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-02T21:07:06,207 INFO [RS-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40154, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-12-02T21:07:07,390 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7d4f3b9a7081%2C41569%2C1733173594323.1733173627390 2024-12-02T21:07:07,608 INFO [Time-limited test {}] wal.AbstractFSWAL(1183): Slow sync cost: 213 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33281,DS-f5629455-818a-43eb-b35c-bea4817169a9,DISK], DatanodeInfoWithStorage[127.0.0.1:39177,DS-791e1d83-1ef3-4bad-8ad0-a2e3cf66b8a0,DISK]] 2024-12-02T21:07:07,609 INFO [Time-limited test {}] wal.AbstractFSWAL(837): Rolled WAL /user/jenkins/test-data/2cbae3b8-f761-787a-953c-0b58f120ed37/WALs/7d4f3b9a7081,41569,1733173594323/7d4f3b9a7081%2C41569%2C1733173594323.1733173607311 with entries=12, filesize=12.10 KB; new WAL /user/jenkins/test-data/2cbae3b8-f761-787a-953c-0b58f120ed37/WALs/7d4f3b9a7081,41569,1733173594323/7d4f3b9a7081%2C41569%2C1733173594323.1733173627390 2024-12-02T21:07:07,609 DEBUG [Time-limited test {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45619:45619),(127.0.0.1/127.0.0.1:46083:46083)] 2024-12-02T21:07:07,610 DEBUG [Time-limited test {}] wal.AbstractFSWAL(751): hdfs://localhost:39905/user/jenkins/test-data/2cbae3b8-f761-787a-953c-0b58f120ed37/WALs/7d4f3b9a7081,41569,1733173594323/7d4f3b9a7081%2C41569%2C1733173594323.1733173607311 is not closed yet, will try archiving it next time 2024-12-02T21:07:07,612 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39177 is added to blk_1073741839_1015 (size=12399) 2024-12-02T21:07:07,612 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33281 is added to blk_1073741839_1015 (size=12399) 2024-12-02T21:07:07,813 INFO [sync.4 {}] wal.AbstractFSWAL(1183): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39177,DS-791e1d83-1ef3-4bad-8ad0-a2e3cf66b8a0,DISK], DatanodeInfoWithStorage[127.0.0.1:33281,DS-f5629455-818a-43eb-b35c-bea4817169a9,DISK]] 2024-12-02T21:07:10,019 INFO [sync.0 {}] wal.AbstractFSWAL(1183): Slow sync cost: 202 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39177,DS-791e1d83-1ef3-4bad-8ad0-a2e3cf66b8a0,DISK], DatanodeInfoWithStorage[127.0.0.1:33281,DS-f5629455-818a-43eb-b35c-bea4817169a9,DISK]] 2024-12-02T21:07:12,226 INFO [sync.1 {}] wal.AbstractFSWAL(1183): Slow sync cost: 202 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39177,DS-791e1d83-1ef3-4bad-8ad0-a2e3cf66b8a0,DISK], DatanodeInfoWithStorage[127.0.0.1:33281,DS-f5629455-818a-43eb-b35c-bea4817169a9,DISK]] 2024-12-02T21:07:14,432 INFO [sync.2 {}] wal.AbstractFSWAL(1183): Slow sync cost: 202 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39177,DS-791e1d83-1ef3-4bad-8ad0-a2e3cf66b8a0,DISK], DatanodeInfoWithStorage[127.0.0.1:33281,DS-f5629455-818a-43eb-b35c-bea4817169a9,DISK]] 2024-12-02T21:07:14,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41569 {}] regionserver.HRegion(8581): Flush requested on 6ab24299f8b9a2ccc778b34be8469b97 2024-12-02T21:07:14,433 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 6ab24299f8b9a2ccc778b34be8469b97 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-02T21:07:14,635 INFO [sync.3 {}] wal.AbstractFSWAL(1183): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39177,DS-791e1d83-1ef3-4bad-8ad0-a2e3cf66b8a0,DISK], DatanodeInfoWithStorage[127.0.0.1:33281,DS-f5629455-818a-43eb-b35c-bea4817169a9,DISK]] 2024-12-02T21:07:14,643 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39905/user/jenkins/test-data/2cbae3b8-f761-787a-953c-0b58f120ed37/data/default/TestLogRolling-testSlowSyncLogRolling/6ab24299f8b9a2ccc778b34be8469b97/.tmp/info/95e71a88dce047778c6fb77c8f3926be is 1080, key is row0008/info:/1733173621372/Put/seqid=0 2024-12-02T21:07:14,652 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33281 is added to blk_1073741842_1018 (size=12509) 2024-12-02T21:07:14,652 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39177 is added to blk_1073741842_1018 (size=12509) 2024-12-02T21:07:14,653 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=21 (bloomFilter=true), to=hdfs://localhost:39905/user/jenkins/test-data/2cbae3b8-f761-787a-953c-0b58f120ed37/data/default/TestLogRolling-testSlowSyncLogRolling/6ab24299f8b9a2ccc778b34be8469b97/.tmp/info/95e71a88dce047778c6fb77c8f3926be 2024-12-02T21:07:14,665 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39905/user/jenkins/test-data/2cbae3b8-f761-787a-953c-0b58f120ed37/data/default/TestLogRolling-testSlowSyncLogRolling/6ab24299f8b9a2ccc778b34be8469b97/.tmp/info/95e71a88dce047778c6fb77c8f3926be as hdfs://localhost:39905/user/jenkins/test-data/2cbae3b8-f761-787a-953c-0b58f120ed37/data/default/TestLogRolling-testSlowSyncLogRolling/6ab24299f8b9a2ccc778b34be8469b97/info/95e71a88dce047778c6fb77c8f3926be 2024-12-02T21:07:14,675 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39905/user/jenkins/test-data/2cbae3b8-f761-787a-953c-0b58f120ed37/data/default/TestLogRolling-testSlowSyncLogRolling/6ab24299f8b9a2ccc778b34be8469b97/info/95e71a88dce047778c6fb77c8f3926be, entries=7, sequenceid=21, filesize=12.2 K 2024-12-02T21:07:14,877 INFO [sync.4 {}] wal.AbstractFSWAL(1183): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39177,DS-791e1d83-1ef3-4bad-8ad0-a2e3cf66b8a0,DISK], DatanodeInfoWithStorage[127.0.0.1:33281,DS-f5629455-818a-43eb-b35c-bea4817169a9,DISK]] 2024-12-02T21:07:14,878 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 6ab24299f8b9a2ccc778b34be8469b97 in 445ms, sequenceid=21, compaction requested=false 2024-12-02T21:07:14,878 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 6ab24299f8b9a2ccc778b34be8469b97: 2024-12-02T21:07:14,879 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(109): Should split because info size=24.4 K, sizeToCheck=16.0 K 2024-12-02T21:07:14,879 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-02T21:07:14,882 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:39905/user/jenkins/test-data/2cbae3b8-f761-787a-953c-0b58f120ed37/data/default/TestLogRolling-testSlowSyncLogRolling/6ab24299f8b9a2ccc778b34be8469b97/info/c2078ecf498445139bc1de1aaad66427 because midkey is the same as first or last row 2024-12-02T21:07:16,638 INFO [sync.0 {}] wal.AbstractFSWAL(1183): Slow sync cost: 202 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39177,DS-791e1d83-1ef3-4bad-8ad0-a2e3cf66b8a0,DISK], DatanodeInfoWithStorage[127.0.0.1:33281,DS-f5629455-818a-43eb-b35c-bea4817169a9,DISK]] 2024-12-02T21:07:18,076 INFO [master/7d4f3b9a7081:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-12-02T21:07:18,077 INFO [master/7d4f3b9a7081:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-12-02T21:07:18,845 WARN [sync.1 {}] wal.AbstractFSWAL(1346): Requesting log roll because we exceeded slow sync threshold; count=7, threshold=5, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39177,DS-791e1d83-1ef3-4bad-8ad0-a2e3cf66b8a0,DISK], DatanodeInfoWithStorage[127.0.0.1:33281,DS-f5629455-818a-43eb-b35c-bea4817169a9,DISK]] 2024-12-02T21:07:18,848 DEBUG [regionserver/7d4f3b9a7081:0.logRoller {}] wal.AbstractWALRoller(197): WAL FSHLog 7d4f3b9a7081%2C41569%2C1733173594323:(num 1733173627390) roll requested 2024-12-02T21:07:18,849 INFO [sync.1 {}] wal.AbstractFSWAL(1183): Slow sync cost: 206 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39177,DS-791e1d83-1ef3-4bad-8ad0-a2e3cf66b8a0,DISK], DatanodeInfoWithStorage[127.0.0.1:33281,DS-f5629455-818a-43eb-b35c-bea4817169a9,DISK]] 2024-12-02T21:07:18,849 INFO [regionserver/7d4f3b9a7081:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7d4f3b9a7081%2C41569%2C1733173594323.1733173638849 2024-12-02T21:07:19,065 INFO [regionserver/7d4f3b9a7081:0.logRoller {}] wal.AbstractFSWAL(1183): Slow sync cost: 211 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39177,DS-791e1d83-1ef3-4bad-8ad0-a2e3cf66b8a0,DISK], DatanodeInfoWithStorage[127.0.0.1:33281,DS-f5629455-818a-43eb-b35c-bea4817169a9,DISK]] 2024-12-02T21:07:19,265 INFO [sync.2 {}] wal.AbstractFSWAL(1183): Slow sync cost: 200 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39177,DS-791e1d83-1ef3-4bad-8ad0-a2e3cf66b8a0,DISK], DatanodeInfoWithStorage[127.0.0.1:33281,DS-f5629455-818a-43eb-b35c-bea4817169a9,DISK]] 2024-12-02T21:07:19,266 INFO [regionserver/7d4f3b9a7081:0.logRoller {}] wal.AbstractFSWAL(837): Rolled WAL /user/jenkins/test-data/2cbae3b8-f761-787a-953c-0b58f120ed37/WALs/7d4f3b9a7081,41569,1733173594323/7d4f3b9a7081%2C41569%2C1733173594323.1733173627390 with entries=8, filesize=7.55 KB; new WAL /user/jenkins/test-data/2cbae3b8-f761-787a-953c-0b58f120ed37/WALs/7d4f3b9a7081,41569,1733173594323/7d4f3b9a7081%2C41569%2C1733173594323.1733173638849 2024-12-02T21:07:19,266 DEBUG [regionserver/7d4f3b9a7081:0.logRoller {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45619:45619),(127.0.0.1/127.0.0.1:46083:46083)] 2024-12-02T21:07:19,267 DEBUG [regionserver/7d4f3b9a7081:0.logRoller {}] wal.AbstractFSWAL(751): hdfs://localhost:39905/user/jenkins/test-data/2cbae3b8-f761-787a-953c-0b58f120ed37/WALs/7d4f3b9a7081,41569,1733173594323/7d4f3b9a7081%2C41569%2C1733173594323.1733173627390 is not closed yet, will try archiving it next time 2024-12-02T21:07:19,268 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(818): Archiving hdfs://localhost:39905/user/jenkins/test-data/2cbae3b8-f761-787a-953c-0b58f120ed37/WALs/7d4f3b9a7081,41569,1733173594323/7d4f3b9a7081%2C41569%2C1733173594323.1733173607311 to hdfs://localhost:39905/user/jenkins/test-data/2cbae3b8-f761-787a-953c-0b58f120ed37/oldWALs/7d4f3b9a7081%2C41569%2C1733173594323.1733173607311 2024-12-02T21:07:19,270 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33281 is added to blk_1073741841_1017 (size=7739) 2024-12-02T21:07:19,270 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39177 is added to blk_1073741841_1017 (size=7739) 2024-12-02T21:07:21,051 INFO [sync.3 {}] wal.AbstractFSWAL(1183): Slow sync cost: 202 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39177,DS-791e1d83-1ef3-4bad-8ad0-a2e3cf66b8a0,DISK], DatanodeInfoWithStorage[127.0.0.1:33281,DS-f5629455-818a-43eb-b35c-bea4817169a9,DISK]] 2024-12-02T21:07:22,677 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(324): Region 6ab24299f8b9a2ccc778b34be8469b97, had cached 0 bytes from a total of 25018 2024-12-02T21:07:23,256 INFO [sync.4 {}] wal.AbstractFSWAL(1183): Slow sync cost: 200 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39177,DS-791e1d83-1ef3-4bad-8ad0-a2e3cf66b8a0,DISK], DatanodeInfoWithStorage[127.0.0.1:33281,DS-f5629455-818a-43eb-b35c-bea4817169a9,DISK]] 2024-12-02T21:07:25,461 INFO [sync.0 {}] wal.AbstractFSWAL(1183): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39177,DS-791e1d83-1ef3-4bad-8ad0-a2e3cf66b8a0,DISK], DatanodeInfoWithStorage[127.0.0.1:33281,DS-f5629455-818a-43eb-b35c-bea4817169a9,DISK]] 2024-12-02T21:07:27,667 INFO [sync.1 {}] wal.AbstractFSWAL(1183): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39177,DS-791e1d83-1ef3-4bad-8ad0-a2e3cf66b8a0,DISK], DatanodeInfoWithStorage[127.0.0.1:33281,DS-f5629455-818a-43eb-b35c-bea4817169a9,DISK]] 2024-12-02T21:07:29,671 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-12-02T21:07:29,673 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7d4f3b9a7081%2C41569%2C1733173594323.1733173649672 2024-12-02T21:07:32,821 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-02T21:07:34,688 INFO [Time-limited test {}] wal.AbstractFSWAL(1183): Slow sync cost: 5011 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39177,DS-791e1d83-1ef3-4bad-8ad0-a2e3cf66b8a0,DISK], DatanodeInfoWithStorage[127.0.0.1:33281,DS-f5629455-818a-43eb-b35c-bea4817169a9,DISK]] 2024-12-02T21:07:34,688 WARN [Time-limited test {}] wal.AbstractFSWAL(1189): Requesting log roll because we exceeded slow sync threshold; time=5011 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39177,DS-791e1d83-1ef3-4bad-8ad0-a2e3cf66b8a0,DISK], DatanodeInfoWithStorage[127.0.0.1:33281,DS-f5629455-818a-43eb-b35c-bea4817169a9,DISK]] 2024-12-02T21:07:34,688 DEBUG [regionserver/7d4f3b9a7081:0.logRoller {}] wal.AbstractWALRoller(197): WAL FSHLog 7d4f3b9a7081%2C41569%2C1733173594323:(num 1733173649672) roll requested 2024-12-02T21:07:36,515 DEBUG [master/7d4f3b9a7081:0.Chore.1 {}] balancer.RegionLocationFinder(172): Locality for region 53ecb1025a6e96f65e8ed6c8adf597f0 changed from -1.0 to 0.0, refreshing cache 2024-12-02T21:07:39,688 INFO [sync.2 {}] wal.AbstractFSWAL(1183): Slow sync cost: 5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39177,DS-791e1d83-1ef3-4bad-8ad0-a2e3cf66b8a0,DISK], DatanodeInfoWithStorage[127.0.0.1:33281,DS-f5629455-818a-43eb-b35c-bea4817169a9,DISK]] 2024-12-02T21:07:39,689 WARN [sync.2 {}] wal.AbstractFSWAL(1189): Requesting log roll because we exceeded slow sync threshold; time=5000 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39177,DS-791e1d83-1ef3-4bad-8ad0-a2e3cf66b8a0,DISK], DatanodeInfoWithStorage[127.0.0.1:33281,DS-f5629455-818a-43eb-b35c-bea4817169a9,DISK]] 2024-12-02T21:07:39,690 INFO [Time-limited test {}] wal.AbstractFSWAL(837): Rolled WAL /user/jenkins/test-data/2cbae3b8-f761-787a-953c-0b58f120ed37/WALs/7d4f3b9a7081,41569,1733173594323/7d4f3b9a7081%2C41569%2C1733173594323.1733173638849 with entries=4, filesize=4.63 KB; new WAL /user/jenkins/test-data/2cbae3b8-f761-787a-953c-0b58f120ed37/WALs/7d4f3b9a7081,41569,1733173594323/7d4f3b9a7081%2C41569%2C1733173594323.1733173649672 2024-12-02T21:07:39,690 DEBUG [Time-limited test {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46083:46083),(127.0.0.1/127.0.0.1:45619:45619)] 2024-12-02T21:07:39,690 DEBUG [Time-limited test {}] wal.AbstractFSWAL(751): hdfs://localhost:39905/user/jenkins/test-data/2cbae3b8-f761-787a-953c-0b58f120ed37/WALs/7d4f3b9a7081,41569,1733173594323/7d4f3b9a7081%2C41569%2C1733173594323.1733173638849 is not closed yet, will try archiving it next time 2024-12-02T21:07:39,692 INFO [regionserver/7d4f3b9a7081:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7d4f3b9a7081%2C41569%2C1733173594323.1733173659691 2024-12-02T21:07:39,695 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33281 is added to blk_1073741843_1019 (size=4753) 2024-12-02T21:07:39,696 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39177 is added to blk_1073741843_1019 (size=4753) 2024-12-02T21:07:44,715 INFO [sync.3 {}] wal.AbstractFSWAL(1183): Slow sync cost: 5020 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33281,DS-f5629455-818a-43eb-b35c-bea4817169a9,DISK], DatanodeInfoWithStorage[127.0.0.1:39177,DS-791e1d83-1ef3-4bad-8ad0-a2e3cf66b8a0,DISK]] 2024-12-02T21:07:44,715 WARN [sync.3 {}] wal.AbstractFSWAL(1189): Requesting log roll because we exceeded slow sync threshold; time=5020 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33281,DS-f5629455-818a-43eb-b35c-bea4817169a9,DISK], DatanodeInfoWithStorage[127.0.0.1:39177,DS-791e1d83-1ef3-4bad-8ad0-a2e3cf66b8a0,DISK]] 2024-12-02T21:07:44,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41569 {}] regionserver.HRegion(8581): Flush requested on 6ab24299f8b9a2ccc778b34be8469b97 2024-12-02T21:07:44,716 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 6ab24299f8b9a2ccc778b34be8469b97 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-02T21:07:44,725 INFO [regionserver/7d4f3b9a7081:0.logRoller {}] wal.AbstractFSWAL(1183): Slow sync cost: 5029 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33281,DS-f5629455-818a-43eb-b35c-bea4817169a9,DISK], DatanodeInfoWithStorage[127.0.0.1:39177,DS-791e1d83-1ef3-4bad-8ad0-a2e3cf66b8a0,DISK]] 2024-12-02T21:07:44,725 WARN [regionserver/7d4f3b9a7081:0.logRoller {}] wal.AbstractFSWAL(1189): Requesting log roll because we exceeded slow sync threshold; time=5029 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33281,DS-f5629455-818a-43eb-b35c-bea4817169a9,DISK], DatanodeInfoWithStorage[127.0.0.1:39177,DS-791e1d83-1ef3-4bad-8ad0-a2e3cf66b8a0,DISK]] 2024-12-02T21:07:46,716 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-12-02T21:07:49,719 INFO [sync.4 {}] wal.AbstractFSWAL(1183): Slow sync cost: 5002 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33281,DS-f5629455-818a-43eb-b35c-bea4817169a9,DISK], DatanodeInfoWithStorage[127.0.0.1:39177,DS-791e1d83-1ef3-4bad-8ad0-a2e3cf66b8a0,DISK]] 2024-12-02T21:07:49,719 WARN [sync.4 {}] wal.AbstractFSWAL(1189): Requesting log roll because we exceeded slow sync threshold; time=5002 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33281,DS-f5629455-818a-43eb-b35c-bea4817169a9,DISK], DatanodeInfoWithStorage[127.0.0.1:39177,DS-791e1d83-1ef3-4bad-8ad0-a2e3cf66b8a0,DISK]] 2024-12-02T21:07:49,726 INFO [sync.0 {}] wal.AbstractFSWAL(1183): Slow sync cost: 5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33281,DS-f5629455-818a-43eb-b35c-bea4817169a9,DISK], DatanodeInfoWithStorage[127.0.0.1:39177,DS-791e1d83-1ef3-4bad-8ad0-a2e3cf66b8a0,DISK]] 2024-12-02T21:07:49,726 WARN [sync.0 {}] wal.AbstractFSWAL(1189): Requesting log roll because we exceeded slow sync threshold; time=5000 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33281,DS-f5629455-818a-43eb-b35c-bea4817169a9,DISK], DatanodeInfoWithStorage[127.0.0.1:39177,DS-791e1d83-1ef3-4bad-8ad0-a2e3cf66b8a0,DISK]] 2024-12-02T21:07:49,726 INFO [regionserver/7d4f3b9a7081:0.logRoller {}] wal.AbstractFSWAL(837): Rolled WAL /user/jenkins/test-data/2cbae3b8-f761-787a-953c-0b58f120ed37/WALs/7d4f3b9a7081,41569,1733173594323/7d4f3b9a7081%2C41569%2C1733173594323.1733173649672 with entries=2, filesize=1.52 KB; new WAL /user/jenkins/test-data/2cbae3b8-f761-787a-953c-0b58f120ed37/WALs/7d4f3b9a7081,41569,1733173594323/7d4f3b9a7081%2C41569%2C1733173594323.1733173659691 2024-12-02T21:07:49,727 DEBUG [regionserver/7d4f3b9a7081:0.logRoller {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45619:45619),(127.0.0.1/127.0.0.1:46083:46083)] 2024-12-02T21:07:49,727 DEBUG [regionserver/7d4f3b9a7081:0.logRoller {}] wal.AbstractFSWAL(751): hdfs://localhost:39905/user/jenkins/test-data/2cbae3b8-f761-787a-953c-0b58f120ed37/WALs/7d4f3b9a7081,41569,1733173594323/7d4f3b9a7081%2C41569%2C1733173594323.1733173649672 is not closed yet, will try archiving it next time 2024-12-02T21:07:49,727 DEBUG [regionserver/7d4f3b9a7081:0.logRoller {}] wal.AbstractWALRoller(197): WAL FSHLog 7d4f3b9a7081%2C41569%2C1733173594323:(num 1733173659691) roll requested 2024-12-02T21:07:49,727 INFO [regionserver/7d4f3b9a7081:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7d4f3b9a7081%2C41569%2C1733173594323.1733173669727 2024-12-02T21:07:49,807 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39177 is added to blk_1073741844_1020 (size=1569) 2024-12-02T21:07:49,809 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33281 is added to blk_1073741844_1020 (size=1569) 2024-12-02T21:07:49,812 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39905/user/jenkins/test-data/2cbae3b8-f761-787a-953c-0b58f120ed37/data/default/TestLogRolling-testSlowSyncLogRolling/6ab24299f8b9a2ccc778b34be8469b97/.tmp/info/d8b660e4661b4f72ad2811c8877897cf is 1080, key is row0015/info:/1733173636435/Put/seqid=0 2024-12-02T21:07:49,819 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39177 is added to blk_1073741846_1022 (size=12509) 2024-12-02T21:07:49,819 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33281 is added to blk_1073741846_1022 (size=12509) 2024-12-02T21:07:49,820 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=31 (bloomFilter=true), to=hdfs://localhost:39905/user/jenkins/test-data/2cbae3b8-f761-787a-953c-0b58f120ed37/data/default/TestLogRolling-testSlowSyncLogRolling/6ab24299f8b9a2ccc778b34be8469b97/.tmp/info/d8b660e4661b4f72ad2811c8877897cf 2024-12-02T21:07:49,830 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39905/user/jenkins/test-data/2cbae3b8-f761-787a-953c-0b58f120ed37/data/default/TestLogRolling-testSlowSyncLogRolling/6ab24299f8b9a2ccc778b34be8469b97/.tmp/info/d8b660e4661b4f72ad2811c8877897cf as hdfs://localhost:39905/user/jenkins/test-data/2cbae3b8-f761-787a-953c-0b58f120ed37/data/default/TestLogRolling-testSlowSyncLogRolling/6ab24299f8b9a2ccc778b34be8469b97/info/d8b660e4661b4f72ad2811c8877897cf 2024-12-02T21:07:49,838 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39905/user/jenkins/test-data/2cbae3b8-f761-787a-953c-0b58f120ed37/data/default/TestLogRolling-testSlowSyncLogRolling/6ab24299f8b9a2ccc778b34be8469b97/info/d8b660e4661b4f72ad2811c8877897cf, entries=7, sequenceid=31, filesize=12.2 K 2024-12-02T21:07:54,848 INFO [sync.1 {}] wal.AbstractFSWAL(1183): Slow sync cost: 5009 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39177,DS-791e1d83-1ef3-4bad-8ad0-a2e3cf66b8a0,DISK], DatanodeInfoWithStorage[127.0.0.1:33281,DS-f5629455-818a-43eb-b35c-bea4817169a9,DISK]] 2024-12-02T21:07:54,849 WARN [sync.1 {}] wal.AbstractFSWAL(1189): Requesting log roll because we exceeded slow sync threshold; time=5009 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39177,DS-791e1d83-1ef3-4bad-8ad0-a2e3cf66b8a0,DISK], DatanodeInfoWithStorage[127.0.0.1:33281,DS-f5629455-818a-43eb-b35c-bea4817169a9,DISK]] 2024-12-02T21:07:54,849 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 6ab24299f8b9a2ccc778b34be8469b97 in 10133ms, sequenceid=31, compaction requested=true 2024-12-02T21:07:54,850 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 6ab24299f8b9a2ccc778b34be8469b97: 2024-12-02T21:07:54,850 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(109): Should split because info size=36.6 K, sizeToCheck=16.0 K 2024-12-02T21:07:54,851 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-02T21:07:54,851 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:39905/user/jenkins/test-data/2cbae3b8-f761-787a-953c-0b58f120ed37/data/default/TestLogRolling-testSlowSyncLogRolling/6ab24299f8b9a2ccc778b34be8469b97/info/c2078ecf498445139bc1de1aaad66427 because midkey is the same as first or last row 2024-12-02T21:07:54,855 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6ab24299f8b9a2ccc778b34be8469b97:info, priority=-2147483648, current under compaction store size is 1 2024-12-02T21:07:54,855 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T21:07:54,855 DEBUG [RS:0;7d4f3b9a7081:41569-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-02T21:07:54,857 INFO [regionserver/7d4f3b9a7081:0.logRoller {}] wal.AbstractFSWAL(1183): Slow sync cost: 5048 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39177,DS-791e1d83-1ef3-4bad-8ad0-a2e3cf66b8a0,DISK], DatanodeInfoWithStorage[127.0.0.1:33281,DS-f5629455-818a-43eb-b35c-bea4817169a9,DISK]] 2024-12-02T21:07:54,857 WARN [regionserver/7d4f3b9a7081:0.logRoller {}] wal.AbstractFSWAL(1189): Requesting log roll because we exceeded slow sync threshold; time=5048 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39177,DS-791e1d83-1ef3-4bad-8ad0-a2e3cf66b8a0,DISK], DatanodeInfoWithStorage[127.0.0.1:33281,DS-f5629455-818a-43eb-b35c-bea4817169a9,DISK]] 2024-12-02T21:07:54,859 DEBUG [RS:0;7d4f3b9a7081:41569-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37527 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-02T21:07:54,860 DEBUG [RS:0;7d4f3b9a7081:41569-shortCompactions-0 {}] regionserver.HStore(1540): 6ab24299f8b9a2ccc778b34be8469b97/info is initiating minor compaction (all files) 2024-12-02T21:07:54,860 INFO [RS:0;7d4f3b9a7081:41569-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6ab24299f8b9a2ccc778b34be8469b97/info in TestLogRolling-testSlowSyncLogRolling,,1733173597282.6ab24299f8b9a2ccc778b34be8469b97. 2024-12-02T21:07:54,861 INFO [RS:0;7d4f3b9a7081:41569-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39905/user/jenkins/test-data/2cbae3b8-f761-787a-953c-0b58f120ed37/data/default/TestLogRolling-testSlowSyncLogRolling/6ab24299f8b9a2ccc778b34be8469b97/info/c2078ecf498445139bc1de1aaad66427, hdfs://localhost:39905/user/jenkins/test-data/2cbae3b8-f761-787a-953c-0b58f120ed37/data/default/TestLogRolling-testSlowSyncLogRolling/6ab24299f8b9a2ccc778b34be8469b97/info/95e71a88dce047778c6fb77c8f3926be, hdfs://localhost:39905/user/jenkins/test-data/2cbae3b8-f761-787a-953c-0b58f120ed37/data/default/TestLogRolling-testSlowSyncLogRolling/6ab24299f8b9a2ccc778b34be8469b97/info/d8b660e4661b4f72ad2811c8877897cf] into tmpdir=hdfs://localhost:39905/user/jenkins/test-data/2cbae3b8-f761-787a-953c-0b58f120ed37/data/default/TestLogRolling-testSlowSyncLogRolling/6ab24299f8b9a2ccc778b34be8469b97/.tmp, totalSize=36.6 K 2024-12-02T21:07:54,862 DEBUG [RS:0;7d4f3b9a7081:41569-shortCompactions-0 {}] compactions.Compactor(224): Compacting c2078ecf498445139bc1de1aaad66427, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1733173607326 2024-12-02T21:07:54,863 DEBUG [RS:0;7d4f3b9a7081:41569-shortCompactions-0 {}] compactions.Compactor(224): Compacting 95e71a88dce047778c6fb77c8f3926be, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=21, earliestPutTs=1733173621372 2024-12-02T21:07:54,863 DEBUG [RS:0;7d4f3b9a7081:41569-shortCompactions-0 {}] compactions.Compactor(224): Compacting d8b660e4661b4f72ad2811c8877897cf, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=31, earliestPutTs=1733173636435 2024-12-02T21:07:54,891 INFO [RS:0;7d4f3b9a7081:41569-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6ab24299f8b9a2ccc778b34be8469b97#info#compaction#3 average throughput is 10.77 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-02T21:07:54,892 DEBUG [RS:0;7d4f3b9a7081:41569-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39905/user/jenkins/test-data/2cbae3b8-f761-787a-953c-0b58f120ed37/data/default/TestLogRolling-testSlowSyncLogRolling/6ab24299f8b9a2ccc778b34be8469b97/.tmp/info/fa93c1f4fb1d4bde800a85796fa1ac8e is 1080, key is row0001/info:/1733173607326/Put/seqid=0 2024-12-02T21:07:54,898 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39177 is added to blk_1073741848_1024 (size=27710) 2024-12-02T21:07:54,898 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33281 is added to blk_1073741848_1024 (size=27710) 2024-12-02T21:07:54,907 DEBUG [RS:0;7d4f3b9a7081:41569-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39905/user/jenkins/test-data/2cbae3b8-f761-787a-953c-0b58f120ed37/data/default/TestLogRolling-testSlowSyncLogRolling/6ab24299f8b9a2ccc778b34be8469b97/.tmp/info/fa93c1f4fb1d4bde800a85796fa1ac8e as hdfs://localhost:39905/user/jenkins/test-data/2cbae3b8-f761-787a-953c-0b58f120ed37/data/default/TestLogRolling-testSlowSyncLogRolling/6ab24299f8b9a2ccc778b34be8469b97/info/fa93c1f4fb1d4bde800a85796fa1ac8e 2024-12-02T21:07:59,858 INFO [sync.2 {}] wal.AbstractFSWAL(1183): Slow sync cost: 5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39177,DS-791e1d83-1ef3-4bad-8ad0-a2e3cf66b8a0,DISK], DatanodeInfoWithStorage[127.0.0.1:33281,DS-f5629455-818a-43eb-b35c-bea4817169a9,DISK]] 2024-12-02T21:07:59,858 WARN [sync.2 {}] wal.AbstractFSWAL(1189): Requesting log roll because we exceeded slow sync threshold; time=5000 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39177,DS-791e1d83-1ef3-4bad-8ad0-a2e3cf66b8a0,DISK], DatanodeInfoWithStorage[127.0.0.1:33281,DS-f5629455-818a-43eb-b35c-bea4817169a9,DISK]] 2024-12-02T21:07:59,859 INFO [regionserver/7d4f3b9a7081:0.logRoller {}] wal.AbstractFSWAL(837): Rolled WAL /user/jenkins/test-data/2cbae3b8-f761-787a-953c-0b58f120ed37/WALs/7d4f3b9a7081,41569,1733173594323/7d4f3b9a7081%2C41569%2C1733173594323.1733173659691 with entries=1, filesize=430 B; new WAL /user/jenkins/test-data/2cbae3b8-f761-787a-953c-0b58f120ed37/WALs/7d4f3b9a7081,41569,1733173594323/7d4f3b9a7081%2C41569%2C1733173594323.1733173669727 2024-12-02T21:07:59,859 DEBUG [regionserver/7d4f3b9a7081:0.logRoller {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46083:46083),(127.0.0.1/127.0.0.1:45619:45619)] 2024-12-02T21:07:59,860 DEBUG [regionserver/7d4f3b9a7081:0.logRoller {}] wal.AbstractFSWAL(751): hdfs://localhost:39905/user/jenkins/test-data/2cbae3b8-f761-787a-953c-0b58f120ed37/WALs/7d4f3b9a7081,41569,1733173594323/7d4f3b9a7081%2C41569%2C1733173594323.1733173659691 is not closed yet, will try archiving it next time 2024-12-02T21:07:59,860 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(818): Archiving hdfs://localhost:39905/user/jenkins/test-data/2cbae3b8-f761-787a-953c-0b58f120ed37/WALs/7d4f3b9a7081,41569,1733173594323/7d4f3b9a7081%2C41569%2C1733173594323.1733173627390 to hdfs://localhost:39905/user/jenkins/test-data/2cbae3b8-f761-787a-953c-0b58f120ed37/oldWALs/7d4f3b9a7081%2C41569%2C1733173594323.1733173627390 2024-12-02T21:07:59,860 DEBUG [regionserver/7d4f3b9a7081:0.logRoller {}] wal.AbstractWALRoller(197): WAL FSHLog 7d4f3b9a7081%2C41569%2C1733173594323:(num 1733173679860) roll requested 2024-12-02T21:07:59,861 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7d4f3b9a7081%2C41569%2C1733173594323.1733173679860 2024-12-02T21:07:59,917 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33281 is added to blk_1073741845_1021 (size=438) 2024-12-02T21:07:59,918 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(818): Archiving hdfs://localhost:39905/user/jenkins/test-data/2cbae3b8-f761-787a-953c-0b58f120ed37/WALs/7d4f3b9a7081,41569,1733173594323/7d4f3b9a7081%2C41569%2C1733173594323.1733173638849 to hdfs://localhost:39905/user/jenkins/test-data/2cbae3b8-f761-787a-953c-0b58f120ed37/oldWALs/7d4f3b9a7081%2C41569%2C1733173594323.1733173638849 2024-12-02T21:07:59,918 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39177 is added to blk_1073741845_1021 (size=438) 2024-12-02T21:07:59,922 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(818): Archiving hdfs://localhost:39905/user/jenkins/test-data/2cbae3b8-f761-787a-953c-0b58f120ed37/WALs/7d4f3b9a7081,41569,1733173594323/7d4f3b9a7081%2C41569%2C1733173594323.1733173649672 to hdfs://localhost:39905/user/jenkins/test-data/2cbae3b8-f761-787a-953c-0b58f120ed37/oldWALs/7d4f3b9a7081%2C41569%2C1733173594323.1733173649672 2024-12-02T21:07:59,924 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(818): Archiving hdfs://localhost:39905/user/jenkins/test-data/2cbae3b8-f761-787a-953c-0b58f120ed37/WALs/7d4f3b9a7081,41569,1733173594323/7d4f3b9a7081%2C41569%2C1733173594323.1733173659691 to hdfs://localhost:39905/user/jenkins/test-data/2cbae3b8-f761-787a-953c-0b58f120ed37/oldWALs/7d4f3b9a7081%2C41569%2C1733173594323.1733173659691 2024-12-02T21:08:02,821 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-02T21:08:04,889 INFO [sync.3 {}] wal.AbstractFSWAL(1183): Slow sync cost: 5029 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33281,DS-f5629455-818a-43eb-b35c-bea4817169a9,DISK], DatanodeInfoWithStorage[127.0.0.1:39177,DS-791e1d83-1ef3-4bad-8ad0-a2e3cf66b8a0,DISK]] 2024-12-02T21:08:04,889 WARN [sync.3 {}] wal.AbstractFSWAL(1189): Requesting log roll because we exceeded slow sync threshold; time=5029 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33281,DS-f5629455-818a-43eb-b35c-bea4817169a9,DISK], DatanodeInfoWithStorage[127.0.0.1:39177,DS-791e1d83-1ef3-4bad-8ad0-a2e3cf66b8a0,DISK]] 2024-12-02T21:08:04,891 INFO [RS:0;7d4f3b9a7081:41569-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6ab24299f8b9a2ccc778b34be8469b97/info of 6ab24299f8b9a2ccc778b34be8469b97 into fa93c1f4fb1d4bde800a85796fa1ac8e(size=27.1 K), total size for store is 27.1 K. This selection was in queue for 0sec, and took 10sec to execute. 2024-12-02T21:08:04,891 DEBUG [RS:0;7d4f3b9a7081:41569-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6ab24299f8b9a2ccc778b34be8469b97: 2024-12-02T21:08:04,891 INFO [RS:0;7d4f3b9a7081:41569-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testSlowSyncLogRolling,,1733173597282.6ab24299f8b9a2ccc778b34be8469b97., storeName=6ab24299f8b9a2ccc778b34be8469b97/info, priority=13, startTime=1733173674854; duration=10sec 2024-12-02T21:08:04,892 DEBUG [RS:0;7d4f3b9a7081:41569-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(109): Should split because info size=27.1 K, sizeToCheck=16.0 K 2024-12-02T21:08:04,892 DEBUG [RS:0;7d4f3b9a7081:41569-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-02T21:08:04,892 DEBUG [RS:0;7d4f3b9a7081:41569-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:39905/user/jenkins/test-data/2cbae3b8-f761-787a-953c-0b58f120ed37/data/default/TestLogRolling-testSlowSyncLogRolling/6ab24299f8b9a2ccc778b34be8469b97/info/fa93c1f4fb1d4bde800a85796fa1ac8e because midkey is the same as first or last row 2024-12-02T21:08:04,892 DEBUG [RS:0;7d4f3b9a7081:41569-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T21:08:04,892 DEBUG [RS:0;7d4f3b9a7081:41569-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6ab24299f8b9a2ccc778b34be8469b97:info 2024-12-02T21:08:04,927 INFO [sync.4 {}] wal.AbstractFSWAL(1183): Slow sync cost: 5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33281,DS-f5629455-818a-43eb-b35c-bea4817169a9,DISK], DatanodeInfoWithStorage[127.0.0.1:39177,DS-791e1d83-1ef3-4bad-8ad0-a2e3cf66b8a0,DISK]] 2024-12-02T21:08:04,927 WARN [sync.4 {}] wal.AbstractFSWAL(1189): Requesting log roll because we exceeded slow sync threshold; time=5000 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33281,DS-f5629455-818a-43eb-b35c-bea4817169a9,DISK], DatanodeInfoWithStorage[127.0.0.1:39177,DS-791e1d83-1ef3-4bad-8ad0-a2e3cf66b8a0,DISK]] 2024-12-02T21:08:04,928 INFO [Time-limited test {}] wal.AbstractFSWAL(837): Rolled WAL /user/jenkins/test-data/2cbae3b8-f761-787a-953c-0b58f120ed37/WALs/7d4f3b9a7081,41569,1733173594323/7d4f3b9a7081%2C41569%2C1733173594323.1733173669727 with entries=1, filesize=531 B; new WAL /user/jenkins/test-data/2cbae3b8-f761-787a-953c-0b58f120ed37/WALs/7d4f3b9a7081,41569,1733173594323/7d4f3b9a7081%2C41569%2C1733173594323.1733173679860 2024-12-02T21:08:04,928 DEBUG [Time-limited test {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46083:46083),(127.0.0.1/127.0.0.1:45619:45619)] 2024-12-02T21:08:04,928 DEBUG [Time-limited test {}] wal.AbstractFSWAL(751): hdfs://localhost:39905/user/jenkins/test-data/2cbae3b8-f761-787a-953c-0b58f120ed37/WALs/7d4f3b9a7081,41569,1733173594323/7d4f3b9a7081%2C41569%2C1733173594323.1733173669727 is not closed yet, will try archiving it next time 2024-12-02T21:08:04,929 INFO [regionserver/7d4f3b9a7081:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7d4f3b9a7081%2C41569%2C1733173594323.1733173684929 2024-12-02T21:08:04,932 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39177 is added to blk_1073741847_1023 (size=539) 2024-12-02T21:08:04,933 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33281 is added to blk_1073741847_1023 (size=539) 2024-12-02T21:08:04,934 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(818): Archiving hdfs://localhost:39905/user/jenkins/test-data/2cbae3b8-f761-787a-953c-0b58f120ed37/WALs/7d4f3b9a7081,41569,1733173594323/7d4f3b9a7081%2C41569%2C1733173594323.1733173669727 to hdfs://localhost:39905/user/jenkins/test-data/2cbae3b8-f761-787a-953c-0b58f120ed37/oldWALs/7d4f3b9a7081%2C41569%2C1733173594323.1733173669727 2024-12-02T21:08:04,941 INFO [regionserver/7d4f3b9a7081:0.logRoller {}] wal.AbstractFSWAL(837): Rolled WAL /user/jenkins/test-data/2cbae3b8-f761-787a-953c-0b58f120ed37/WALs/7d4f3b9a7081,41569,1733173594323/7d4f3b9a7081%2C41569%2C1733173594323.1733173679860 with entries=1, filesize=1.22 KB; new WAL /user/jenkins/test-data/2cbae3b8-f761-787a-953c-0b58f120ed37/WALs/7d4f3b9a7081,41569,1733173594323/7d4f3b9a7081%2C41569%2C1733173594323.1733173684929 2024-12-02T21:08:04,941 DEBUG [regionserver/7d4f3b9a7081:0.logRoller {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45619:45619),(127.0.0.1/127.0.0.1:46083:46083)] 2024-12-02T21:08:04,941 DEBUG [regionserver/7d4f3b9a7081:0.logRoller {}] wal.AbstractFSWAL(751): hdfs://localhost:39905/user/jenkins/test-data/2cbae3b8-f761-787a-953c-0b58f120ed37/WALs/7d4f3b9a7081,41569,1733173594323/7d4f3b9a7081%2C41569%2C1733173594323.1733173679860 is not closed yet, will try archiving it next time 2024-12-02T21:08:04,941 DEBUG [regionserver/7d4f3b9a7081:0.logRoller {}] wal.AbstractWALRoller(197): WAL FSHLog 7d4f3b9a7081%2C41569%2C1733173594323:(num 1733173684929) roll requested 2024-12-02T21:08:04,942 INFO [regionserver/7d4f3b9a7081:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7d4f3b9a7081%2C41569%2C1733173594323.1733173684941 2024-12-02T21:08:04,943 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39177 is added to blk_1073741849_1025 (size=1258) 2024-12-02T21:08:04,944 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33281 is added to blk_1073741849_1025 (size=1258) 2024-12-02T21:08:04,949 INFO [regionserver/7d4f3b9a7081:0.logRoller {}] wal.AbstractFSWAL(837): Rolled WAL /user/jenkins/test-data/2cbae3b8-f761-787a-953c-0b58f120ed37/WALs/7d4f3b9a7081,41569,1733173594323/7d4f3b9a7081%2C41569%2C1733173594323.1733173684929 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/2cbae3b8-f761-787a-953c-0b58f120ed37/WALs/7d4f3b9a7081,41569,1733173594323/7d4f3b9a7081%2C41569%2C1733173594323.1733173684941 2024-12-02T21:08:04,949 DEBUG [regionserver/7d4f3b9a7081:0.logRoller {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46083:46083),(127.0.0.1/127.0.0.1:45619:45619)] 2024-12-02T21:08:04,949 DEBUG [regionserver/7d4f3b9a7081:0.logRoller {}] wal.AbstractFSWAL(751): hdfs://localhost:39905/user/jenkins/test-data/2cbae3b8-f761-787a-953c-0b58f120ed37/WALs/7d4f3b9a7081,41569,1733173594323/7d4f3b9a7081%2C41569%2C1733173594323.1733173684929 is not closed yet, will try archiving it next time 2024-12-02T21:08:04,951 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33281 is added to blk_1073741850_1026 (size=93) 2024-12-02T21:08:04,951 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39177 is added to blk_1073741850_1026 (size=93) 2024-12-02T21:08:04,952 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(818): Archiving hdfs://localhost:39905/user/jenkins/test-data/2cbae3b8-f761-787a-953c-0b58f120ed37/WALs/7d4f3b9a7081,41569,1733173594323/7d4f3b9a7081%2C41569%2C1733173594323.1733173684929 to hdfs://localhost:39905/user/jenkins/test-data/2cbae3b8-f761-787a-953c-0b58f120ed37/oldWALs/7d4f3b9a7081%2C41569%2C1733173594323.1733173684929 2024-12-02T21:08:07,678 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(324): Region 6ab24299f8b9a2ccc778b34be8469b97, had cached 0 bytes from a total of 27710 2024-12-02T21:08:16,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41569 {}] regionserver.HRegion(8581): Flush requested on 6ab24299f8b9a2ccc778b34be8469b97 2024-12-02T21:08:16,967 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 6ab24299f8b9a2ccc778b34be8469b97 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-02T21:08:16,978 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39905/user/jenkins/test-data/2cbae3b8-f761-787a-953c-0b58f120ed37/data/default/TestLogRolling-testSlowSyncLogRolling/6ab24299f8b9a2ccc778b34be8469b97/.tmp/info/d8ebcc22a1fd448d9a20d701f82dc5a2 is 1080, key is row0022/info:/1733173684930/Put/seqid=0 2024-12-02T21:08:16,986 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39177 is added to blk_1073741852_1028 (size=12509) 2024-12-02T21:08:16,986 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33281 is added to blk_1073741852_1028 (size=12509) 2024-12-02T21:08:16,987 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=42 (bloomFilter=true), to=hdfs://localhost:39905/user/jenkins/test-data/2cbae3b8-f761-787a-953c-0b58f120ed37/data/default/TestLogRolling-testSlowSyncLogRolling/6ab24299f8b9a2ccc778b34be8469b97/.tmp/info/d8ebcc22a1fd448d9a20d701f82dc5a2 2024-12-02T21:08:16,996 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39905/user/jenkins/test-data/2cbae3b8-f761-787a-953c-0b58f120ed37/data/default/TestLogRolling-testSlowSyncLogRolling/6ab24299f8b9a2ccc778b34be8469b97/.tmp/info/d8ebcc22a1fd448d9a20d701f82dc5a2 as hdfs://localhost:39905/user/jenkins/test-data/2cbae3b8-f761-787a-953c-0b58f120ed37/data/default/TestLogRolling-testSlowSyncLogRolling/6ab24299f8b9a2ccc778b34be8469b97/info/d8ebcc22a1fd448d9a20d701f82dc5a2 2024-12-02T21:08:17,003 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39905/user/jenkins/test-data/2cbae3b8-f761-787a-953c-0b58f120ed37/data/default/TestLogRolling-testSlowSyncLogRolling/6ab24299f8b9a2ccc778b34be8469b97/info/d8ebcc22a1fd448d9a20d701f82dc5a2, entries=7, sequenceid=42, filesize=12.2 K 2024-12-02T21:08:17,004 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 6ab24299f8b9a2ccc778b34be8469b97 in 37ms, sequenceid=42, compaction requested=false 2024-12-02T21:08:17,004 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 6ab24299f8b9a2ccc778b34be8469b97: 2024-12-02T21:08:17,004 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(109): Should split because info size=39.3 K, sizeToCheck=16.0 K 2024-12-02T21:08:17,004 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-02T21:08:17,005 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:39905/user/jenkins/test-data/2cbae3b8-f761-787a-953c-0b58f120ed37/data/default/TestLogRolling-testSlowSyncLogRolling/6ab24299f8b9a2ccc778b34be8469b97/info/fa93c1f4fb1d4bde800a85796fa1ac8e because midkey is the same as first or last row 2024-12-02T21:08:24,990 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1340): Shutting down minicluster 2024-12-02T21:08:24,991 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-12-02T21:08:24,992 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x744a1e4d to 127.0.0.1:58068 2024-12-02T21:08:24,992 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T21:08:24,994 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-02T21:08:24,994 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=171534581, stopped=false 2024-12-02T21:08:24,995 INFO [Time-limited test {}] master.ServerManager(987): Cluster shutdown requested of master=7d4f3b9a7081,39967,1733173593620 2024-12-02T21:08:25,038 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39967-0x1019927e59a0000, quorum=127.0.0.1:58068, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-02T21:08:25,038 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41569-0x1019927e59a0001, quorum=127.0.0.1:58068, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-02T21:08:25,038 INFO [Time-limited test {}] procedure2.ProcedureExecutor(700): Stopping 2024-12-02T21:08:25,038 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41569-0x1019927e59a0001, quorum=127.0.0.1:58068, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T21:08:25,038 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39967-0x1019927e59a0000, quorum=127.0.0.1:58068, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T21:08:25,039 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T21:08:25,039 INFO [Time-limited test {}] regionserver.HRegionServer(2561): ***** STOPPING region server '7d4f3b9a7081,41569,1733173594323' ***** 2024-12-02T21:08:25,039 INFO [Time-limited test {}] regionserver.HRegionServer(2575): STOPPED: Shutdown requested 2024-12-02T21:08:25,039 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:39967-0x1019927e59a0000, quorum=127.0.0.1:58068, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-02T21:08:25,039 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:41569-0x1019927e59a0001, quorum=127.0.0.1:58068, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-02T21:08:25,039 INFO [RS:0;7d4f3b9a7081:41569 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-02T21:08:25,040 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(373): MemStoreFlusher.0 exiting 2024-12-02T21:08:25,040 INFO [RS:0;7d4f3b9a7081:41569 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-02T21:08:25,040 INFO [RS:0;7d4f3b9a7081:41569 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-02T21:08:25,040 INFO [RS:0;7d4f3b9a7081:41569 {}] regionserver.HRegionServer(3579): Received CLOSE for 53ecb1025a6e96f65e8ed6c8adf597f0 2024-12-02T21:08:25,040 INFO [RS:0;7d4f3b9a7081:41569 {}] regionserver.HRegionServer(3579): Received CLOSE for 6ab24299f8b9a2ccc778b34be8469b97 2024-12-02T21:08:25,040 INFO [RS:0;7d4f3b9a7081:41569 {}] regionserver.HRegionServer(1224): stopping server 7d4f3b9a7081,41569,1733173594323 2024-12-02T21:08:25,040 DEBUG [RS:0;7d4f3b9a7081:41569 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T21:08:25,041 INFO [RS:0;7d4f3b9a7081:41569 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-02T21:08:25,041 INFO [RS:0;7d4f3b9a7081:41569 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-02T21:08:25,041 INFO [RS:0;7d4f3b9a7081:41569 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-02T21:08:25,041 INFO [RS:0;7d4f3b9a7081:41569 {}] regionserver.HRegionServer(3579): Received CLOSE for 1588230740 2024-12-02T21:08:25,041 DEBUG [RS_CLOSE_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1681): Closing 53ecb1025a6e96f65e8ed6c8adf597f0, disabling compactions & flushes 2024-12-02T21:08:25,041 INFO [RS_CLOSE_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1703): Closing region hbase:namespace,,1733173596441.53ecb1025a6e96f65e8ed6c8adf597f0. 2024-12-02T21:08:25,041 DEBUG [RS_CLOSE_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:namespace,,1733173596441.53ecb1025a6e96f65e8ed6c8adf597f0. 2024-12-02T21:08:25,041 DEBUG [RS_CLOSE_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1791): Acquired close lock on hbase:namespace,,1733173596441.53ecb1025a6e96f65e8ed6c8adf597f0. after waiting 0 ms 2024-12-02T21:08:25,041 DEBUG [RS_CLOSE_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1801): Updates disabled for region hbase:namespace,,1733173596441.53ecb1025a6e96f65e8ed6c8adf597f0. 2024-12-02T21:08:25,041 INFO [RS:0;7d4f3b9a7081:41569 {}] regionserver.HRegionServer(1599): Waiting on 3 regions to close 2024-12-02T21:08:25,041 INFO [RS_CLOSE_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2837): Flushing 53ecb1025a6e96f65e8ed6c8adf597f0 1/1 column families, dataSize=78 B heapSize=488 B 2024-12-02T21:08:25,041 DEBUG [RS:0;7d4f3b9a7081:41569 {}] regionserver.HRegionServer(1603): Online Regions={53ecb1025a6e96f65e8ed6c8adf597f0=hbase:namespace,,1733173596441.53ecb1025a6e96f65e8ed6c8adf597f0., 6ab24299f8b9a2ccc778b34be8469b97=TestLogRolling-testSlowSyncLogRolling,,1733173597282.6ab24299f8b9a2ccc778b34be8469b97., 1588230740=hbase:meta,,1.1588230740} 2024-12-02T21:08:25,041 DEBUG [RS_CLOSE_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1681): Closing 1588230740, disabling compactions & flushes 2024-12-02T21:08:25,041 INFO [RS_CLOSE_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1703): Closing region hbase:meta,,1.1588230740 2024-12-02T21:08:25,041 DEBUG [RS_CLOSE_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:meta,,1.1588230740 2024-12-02T21:08:25,041 DEBUG [RS_CLOSE_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1791): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-02T21:08:25,041 DEBUG [RS_CLOSE_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1801): Updates disabled for region hbase:meta,,1.1588230740 2024-12-02T21:08:25,042 DEBUG [RS:0;7d4f3b9a7081:41569 {}] regionserver.HRegionServer(1629): Waiting on 1588230740, 53ecb1025a6e96f65e8ed6c8adf597f0, 6ab24299f8b9a2ccc778b34be8469b97 2024-12-02T21:08:25,042 INFO [RS_CLOSE_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2837): Flushing 1588230740 3/3 column families, dataSize=2.81 KB heapSize=5.32 KB 2024-12-02T21:08:25,056 DEBUG [RS_CLOSE_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39905/user/jenkins/test-data/2cbae3b8-f761-787a-953c-0b58f120ed37/data/hbase/namespace/53ecb1025a6e96f65e8ed6c8adf597f0/.tmp/info/ec1c35fe08a948e2a808fcb857cfe583 is 45, key is default/info:d/1733173596997/Put/seqid=0 2024-12-02T21:08:25,060 DEBUG [RS_CLOSE_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39905/user/jenkins/test-data/2cbae3b8-f761-787a-953c-0b58f120ed37/data/hbase/meta/1588230740/.tmp/info/6b54498a306141a1ba1eb558c3051d6d is 195, key is TestLogRolling-testSlowSyncLogRolling,,1733173597282.6ab24299f8b9a2ccc778b34be8469b97./info:regioninfo/1733173597698/Put/seqid=0 2024-12-02T21:08:25,062 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39177 is added to blk_1073741853_1029 (size=5037) 2024-12-02T21:08:25,062 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33281 is added to blk_1073741853_1029 (size=5037) 2024-12-02T21:08:25,062 INFO [RS_CLOSE_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=78 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:39905/user/jenkins/test-data/2cbae3b8-f761-787a-953c-0b58f120ed37/data/hbase/namespace/53ecb1025a6e96f65e8ed6c8adf597f0/.tmp/info/ec1c35fe08a948e2a808fcb857cfe583 2024-12-02T21:08:25,066 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39177 is added to blk_1073741854_1030 (size=8172) 2024-12-02T21:08:25,066 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33281 is added to blk_1073741854_1030 (size=8172) 2024-12-02T21:08:25,067 INFO [RS_CLOSE_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.59 KB at sequenceid=14 (bloomFilter=true), to=hdfs://localhost:39905/user/jenkins/test-data/2cbae3b8-f761-787a-953c-0b58f120ed37/data/hbase/meta/1588230740/.tmp/info/6b54498a306141a1ba1eb558c3051d6d 2024-12-02T21:08:25,072 DEBUG [RS_CLOSE_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39905/user/jenkins/test-data/2cbae3b8-f761-787a-953c-0b58f120ed37/data/hbase/namespace/53ecb1025a6e96f65e8ed6c8adf597f0/.tmp/info/ec1c35fe08a948e2a808fcb857cfe583 as hdfs://localhost:39905/user/jenkins/test-data/2cbae3b8-f761-787a-953c-0b58f120ed37/data/hbase/namespace/53ecb1025a6e96f65e8ed6c8adf597f0/info/ec1c35fe08a948e2a808fcb857cfe583 2024-12-02T21:08:25,079 INFO [RS_CLOSE_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39905/user/jenkins/test-data/2cbae3b8-f761-787a-953c-0b58f120ed37/data/hbase/namespace/53ecb1025a6e96f65e8ed6c8adf597f0/info/ec1c35fe08a948e2a808fcb857cfe583, entries=2, sequenceid=6, filesize=4.9 K 2024-12-02T21:08:25,080 INFO [RS_CLOSE_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3040): Finished flush of dataSize ~78 B/78, heapSize ~472 B/472, currentSize=0 B/0 for 53ecb1025a6e96f65e8ed6c8adf597f0 in 39ms, sequenceid=6, compaction requested=false 2024-12-02T21:08:25,086 DEBUG [RS_CLOSE_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:39905/user/jenkins/test-data/2cbae3b8-f761-787a-953c-0b58f120ed37/data/hbase/namespace/53ecb1025a6e96f65e8ed6c8adf597f0/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-02T21:08:25,088 INFO [RS_CLOSE_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1922): Closed hbase:namespace,,1733173596441.53ecb1025a6e96f65e8ed6c8adf597f0. 2024-12-02T21:08:25,088 DEBUG [RS_CLOSE_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1635): Region close journal for 53ecb1025a6e96f65e8ed6c8adf597f0: 2024-12-02T21:08:25,089 DEBUG [RS_CLOSE_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed hbase:namespace,,1733173596441.53ecb1025a6e96f65e8ed6c8adf597f0. 2024-12-02T21:08:25,089 DEBUG [RS_CLOSE_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1681): Closing 6ab24299f8b9a2ccc778b34be8469b97, disabling compactions & flushes 2024-12-02T21:08:25,089 INFO [RS_CLOSE_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1703): Closing region TestLogRolling-testSlowSyncLogRolling,,1733173597282.6ab24299f8b9a2ccc778b34be8469b97. 2024-12-02T21:08:25,089 DEBUG [RS_CLOSE_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestLogRolling-testSlowSyncLogRolling,,1733173597282.6ab24299f8b9a2ccc778b34be8469b97. 2024-12-02T21:08:25,089 DEBUG [RS_CLOSE_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1791): Acquired close lock on TestLogRolling-testSlowSyncLogRolling,,1733173597282.6ab24299f8b9a2ccc778b34be8469b97. after waiting 0 ms 2024-12-02T21:08:25,089 DEBUG [RS_CLOSE_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1801): Updates disabled for region TestLogRolling-testSlowSyncLogRolling,,1733173597282.6ab24299f8b9a2ccc778b34be8469b97. 2024-12-02T21:08:25,089 INFO [RS_CLOSE_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2837): Flushing 6ab24299f8b9a2ccc778b34be8469b97 1/1 column families, dataSize=3.15 KB heapSize=3.63 KB 2024-12-02T21:08:25,090 DEBUG [RS_CLOSE_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39905/user/jenkins/test-data/2cbae3b8-f761-787a-953c-0b58f120ed37/data/hbase/meta/1588230740/.tmp/table/48e8083fd4c4406c86837834252b48a5 is 73, key is TestLogRolling-testSlowSyncLogRolling/table:state/1733173597709/Put/seqid=0 2024-12-02T21:08:25,095 DEBUG [RS_CLOSE_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39905/user/jenkins/test-data/2cbae3b8-f761-787a-953c-0b58f120ed37/data/default/TestLogRolling-testSlowSyncLogRolling/6ab24299f8b9a2ccc778b34be8469b97/.tmp/info/4ab8f301382e4f52829c4eb68d97e524 is 1080, key is row0029/info:/1733173698970/Put/seqid=0 2024-12-02T21:08:25,096 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39177 is added to blk_1073741855_1031 (size=5452) 2024-12-02T21:08:25,096 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33281 is added to blk_1073741855_1031 (size=5452) 2024-12-02T21:08:25,097 INFO [RS_CLOSE_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=232 B at sequenceid=14 (bloomFilter=true), to=hdfs://localhost:39905/user/jenkins/test-data/2cbae3b8-f761-787a-953c-0b58f120ed37/data/hbase/meta/1588230740/.tmp/table/48e8083fd4c4406c86837834252b48a5 2024-12-02T21:08:25,101 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33281 is added to blk_1073741856_1032 (size=8193) 2024-12-02T21:08:25,101 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39177 is added to blk_1073741856_1032 (size=8193) 2024-12-02T21:08:25,102 INFO [RS_CLOSE_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.15 KB at sequenceid=48 (bloomFilter=true), to=hdfs://localhost:39905/user/jenkins/test-data/2cbae3b8-f761-787a-953c-0b58f120ed37/data/default/TestLogRolling-testSlowSyncLogRolling/6ab24299f8b9a2ccc778b34be8469b97/.tmp/info/4ab8f301382e4f52829c4eb68d97e524 2024-12-02T21:08:25,105 DEBUG [RS_CLOSE_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39905/user/jenkins/test-data/2cbae3b8-f761-787a-953c-0b58f120ed37/data/hbase/meta/1588230740/.tmp/info/6b54498a306141a1ba1eb558c3051d6d as hdfs://localhost:39905/user/jenkins/test-data/2cbae3b8-f761-787a-953c-0b58f120ed37/data/hbase/meta/1588230740/info/6b54498a306141a1ba1eb558c3051d6d 2024-12-02T21:08:25,109 DEBUG [RS_CLOSE_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39905/user/jenkins/test-data/2cbae3b8-f761-787a-953c-0b58f120ed37/data/default/TestLogRolling-testSlowSyncLogRolling/6ab24299f8b9a2ccc778b34be8469b97/.tmp/info/4ab8f301382e4f52829c4eb68d97e524 as hdfs://localhost:39905/user/jenkins/test-data/2cbae3b8-f761-787a-953c-0b58f120ed37/data/default/TestLogRolling-testSlowSyncLogRolling/6ab24299f8b9a2ccc778b34be8469b97/info/4ab8f301382e4f52829c4eb68d97e524 2024-12-02T21:08:25,114 INFO [RS_CLOSE_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39905/user/jenkins/test-data/2cbae3b8-f761-787a-953c-0b58f120ed37/data/hbase/meta/1588230740/info/6b54498a306141a1ba1eb558c3051d6d, entries=20, sequenceid=14, filesize=8.0 K 2024-12-02T21:08:25,115 DEBUG [RS_CLOSE_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39905/user/jenkins/test-data/2cbae3b8-f761-787a-953c-0b58f120ed37/data/hbase/meta/1588230740/.tmp/table/48e8083fd4c4406c86837834252b48a5 as hdfs://localhost:39905/user/jenkins/test-data/2cbae3b8-f761-787a-953c-0b58f120ed37/data/hbase/meta/1588230740/table/48e8083fd4c4406c86837834252b48a5 2024-12-02T21:08:25,118 INFO [RS_CLOSE_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39905/user/jenkins/test-data/2cbae3b8-f761-787a-953c-0b58f120ed37/data/default/TestLogRolling-testSlowSyncLogRolling/6ab24299f8b9a2ccc778b34be8469b97/info/4ab8f301382e4f52829c4eb68d97e524, entries=3, sequenceid=48, filesize=8.0 K 2024-12-02T21:08:25,119 INFO [RS_CLOSE_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3040): Finished flush of dataSize ~3.15 KB/3228, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 6ab24299f8b9a2ccc778b34be8469b97 in 30ms, sequenceid=48, compaction requested=true 2024-12-02T21:08:25,120 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1733173597282.6ab24299f8b9a2ccc778b34be8469b97.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:39905/user/jenkins/test-data/2cbae3b8-f761-787a-953c-0b58f120ed37/data/default/TestLogRolling-testSlowSyncLogRolling/6ab24299f8b9a2ccc778b34be8469b97/info/c2078ecf498445139bc1de1aaad66427, hdfs://localhost:39905/user/jenkins/test-data/2cbae3b8-f761-787a-953c-0b58f120ed37/data/default/TestLogRolling-testSlowSyncLogRolling/6ab24299f8b9a2ccc778b34be8469b97/info/95e71a88dce047778c6fb77c8f3926be, hdfs://localhost:39905/user/jenkins/test-data/2cbae3b8-f761-787a-953c-0b58f120ed37/data/default/TestLogRolling-testSlowSyncLogRolling/6ab24299f8b9a2ccc778b34be8469b97/info/d8b660e4661b4f72ad2811c8877897cf] to archive 2024-12-02T21:08:25,122 INFO [RS_CLOSE_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39905/user/jenkins/test-data/2cbae3b8-f761-787a-953c-0b58f120ed37/data/hbase/meta/1588230740/table/48e8083fd4c4406c86837834252b48a5, entries=4, sequenceid=14, filesize=5.3 K 2024-12-02T21:08:25,123 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1733173597282.6ab24299f8b9a2ccc778b34be8469b97.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-02T21:08:25,124 INFO [RS_CLOSE_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3040): Finished flush of dataSize ~2.81 KB/2882, heapSize ~5.04 KB/5160, currentSize=0 B/0 for 1588230740 in 83ms, sequenceid=14, compaction requested=false 2024-12-02T21:08:25,126 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1733173597282.6ab24299f8b9a2ccc778b34be8469b97.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39905/user/jenkins/test-data/2cbae3b8-f761-787a-953c-0b58f120ed37/data/default/TestLogRolling-testSlowSyncLogRolling/6ab24299f8b9a2ccc778b34be8469b97/info/c2078ecf498445139bc1de1aaad66427 to hdfs://localhost:39905/user/jenkins/test-data/2cbae3b8-f761-787a-953c-0b58f120ed37/archive/data/default/TestLogRolling-testSlowSyncLogRolling/6ab24299f8b9a2ccc778b34be8469b97/info/c2078ecf498445139bc1de1aaad66427 2024-12-02T21:08:25,127 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1733173597282.6ab24299f8b9a2ccc778b34be8469b97.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39905/user/jenkins/test-data/2cbae3b8-f761-787a-953c-0b58f120ed37/data/default/TestLogRolling-testSlowSyncLogRolling/6ab24299f8b9a2ccc778b34be8469b97/info/95e71a88dce047778c6fb77c8f3926be to hdfs://localhost:39905/user/jenkins/test-data/2cbae3b8-f761-787a-953c-0b58f120ed37/archive/data/default/TestLogRolling-testSlowSyncLogRolling/6ab24299f8b9a2ccc778b34be8469b97/info/95e71a88dce047778c6fb77c8f3926be 2024-12-02T21:08:25,129 DEBUG [RS_CLOSE_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:39905/user/jenkins/test-data/2cbae3b8-f761-787a-953c-0b58f120ed37/data/hbase/meta/1588230740/recovered.edits/17.seqid, newMaxSeqId=17, maxSeqId=1 2024-12-02T21:08:25,129 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1733173597282.6ab24299f8b9a2ccc778b34be8469b97.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39905/user/jenkins/test-data/2cbae3b8-f761-787a-953c-0b58f120ed37/data/default/TestLogRolling-testSlowSyncLogRolling/6ab24299f8b9a2ccc778b34be8469b97/info/d8b660e4661b4f72ad2811c8877897cf to hdfs://localhost:39905/user/jenkins/test-data/2cbae3b8-f761-787a-953c-0b58f120ed37/archive/data/default/TestLogRolling-testSlowSyncLogRolling/6ab24299f8b9a2ccc778b34be8469b97/info/d8b660e4661b4f72ad2811c8877897cf 2024-12-02T21:08:25,129 DEBUG [RS_CLOSE_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-02T21:08:25,130 INFO [RS_CLOSE_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1922): Closed hbase:meta,,1.1588230740 2024-12-02T21:08:25,130 DEBUG [RS_CLOSE_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1635): Region close journal for 1588230740: 2024-12-02T21:08:25,130 DEBUG [RS_CLOSE_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-02T21:08:25,144 DEBUG [RS_CLOSE_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:39905/user/jenkins/test-data/2cbae3b8-f761-787a-953c-0b58f120ed37/data/default/TestLogRolling-testSlowSyncLogRolling/6ab24299f8b9a2ccc778b34be8469b97/recovered.edits/51.seqid, newMaxSeqId=51, maxSeqId=1 2024-12-02T21:08:25,144 INFO [RS_CLOSE_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1922): Closed TestLogRolling-testSlowSyncLogRolling,,1733173597282.6ab24299f8b9a2ccc778b34be8469b97. 2024-12-02T21:08:25,145 DEBUG [RS_CLOSE_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1635): Region close journal for 6ab24299f8b9a2ccc778b34be8469b97: 2024-12-02T21:08:25,145 DEBUG [RS_CLOSE_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testSlowSyncLogRolling,,1733173597282.6ab24299f8b9a2ccc778b34be8469b97. 2024-12-02T21:08:25,242 INFO [RS:0;7d4f3b9a7081:41569 {}] regionserver.HRegionServer(1250): stopping server 7d4f3b9a7081,41569,1733173594323; all regions closed. 2024-12-02T21:08:25,244 DEBUG [WAL-Shutdown-0 {}] wal.FSHLog(499): Closing WAL writer in /user/jenkins/test-data/2cbae3b8-f761-787a-953c-0b58f120ed37/WALs/7d4f3b9a7081,41569,1733173594323 2024-12-02T21:08:25,248 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33281 is added to blk_1073741834_1010 (size=4330) 2024-12-02T21:08:25,249 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39177 is added to blk_1073741834_1010 (size=4330) 2024-12-02T21:08:25,253 DEBUG [RS:0;7d4f3b9a7081:41569 {}] wal.AbstractFSWAL(1071): Moved 1 WAL file(s) to /user/jenkins/test-data/2cbae3b8-f761-787a-953c-0b58f120ed37/oldWALs 2024-12-02T21:08:25,253 INFO [RS:0;7d4f3b9a7081:41569 {}] wal.AbstractFSWAL(1074): Closed WAL: FSHLog 7d4f3b9a7081%2C41569%2C1733173594323.meta:.meta(num 1733173596107) 2024-12-02T21:08:25,254 DEBUG [WAL-Shutdown-0 {}] wal.FSHLog(499): Closing WAL writer in /user/jenkins/test-data/2cbae3b8-f761-787a-953c-0b58f120ed37/WALs/7d4f3b9a7081,41569,1733173594323 2024-12-02T21:08:25,256 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39177 is added to blk_1073741851_1027 (size=13066) 2024-12-02T21:08:25,257 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33281 is added to blk_1073741851_1027 (size=13066) 2024-12-02T21:08:25,262 DEBUG [RS:0;7d4f3b9a7081:41569 {}] wal.AbstractFSWAL(1071): Moved 3 WAL file(s) to /user/jenkins/test-data/2cbae3b8-f761-787a-953c-0b58f120ed37/oldWALs 2024-12-02T21:08:25,262 INFO [RS:0;7d4f3b9a7081:41569 {}] wal.AbstractFSWAL(1074): Closed WAL: FSHLog 7d4f3b9a7081%2C41569%2C1733173594323:(num 1733173684941) 2024-12-02T21:08:25,263 DEBUG [RS:0;7d4f3b9a7081:41569 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T21:08:25,263 INFO [RS:0;7d4f3b9a7081:41569 {}] regionserver.LeaseManager(133): Closed leases 2024-12-02T21:08:25,263 INFO [RS:0;7d4f3b9a7081:41569 {}] hbase.ChoreService(370): Chore service for: regionserver/7d4f3b9a7081:0 had [ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-12-02T21:08:25,263 INFO [regionserver/7d4f3b9a7081:0.logRoller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-12-02T21:08:25,263 INFO [RS:0;7d4f3b9a7081:41569 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:41569 2024-12-02T21:08:25,271 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39967-0x1019927e59a0000, quorum=127.0.0.1:58068, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-02T21:08:25,271 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41569-0x1019927e59a0001, quorum=127.0.0.1:58068, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/7d4f3b9a7081,41569,1733173594323 2024-12-02T21:08:25,280 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [7d4f3b9a7081,41569,1733173594323] 2024-12-02T21:08:25,280 DEBUG [RegionServerTracker-0 {}] master.DeadServer(103): Processing 7d4f3b9a7081,41569,1733173594323; numProcessing=1 2024-12-02T21:08:25,288 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/draining/7d4f3b9a7081,41569,1733173594323 already deleted, retry=false 2024-12-02T21:08:25,288 INFO [RegionServerTracker-0 {}] master.ServerManager(652): Cluster shutdown set; 7d4f3b9a7081,41569,1733173594323 expired; onlineServers=0 2024-12-02T21:08:25,288 INFO [RegionServerTracker-0 {}] regionserver.HRegionServer(2561): ***** STOPPING region server '7d4f3b9a7081,39967,1733173593620' ***** 2024-12-02T21:08:25,288 INFO [RegionServerTracker-0 {}] regionserver.HRegionServer(2575): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-02T21:08:25,289 DEBUG [M:0;7d4f3b9a7081:39967 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5dc960f7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=7d4f3b9a7081/172.17.0.2:0 2024-12-02T21:08:25,289 INFO [M:0;7d4f3b9a7081:39967 {}] regionserver.HRegionServer(1224): stopping server 7d4f3b9a7081,39967,1733173593620 2024-12-02T21:08:25,289 INFO [M:0;7d4f3b9a7081:39967 {}] regionserver.HRegionServer(1250): stopping server 7d4f3b9a7081,39967,1733173593620; all regions closed. 2024-12-02T21:08:25,289 DEBUG [M:0;7d4f3b9a7081:39967 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T21:08:25,289 DEBUG [M:0;7d4f3b9a7081:39967 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-02T21:08:25,289 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-02T21:08:25,289 DEBUG [M:0;7d4f3b9a7081:39967 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-02T21:08:25,289 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster-HFileCleaner.small.0-1733173595212 {}] cleaner.HFileCleaner(306): Exit Thread[master/7d4f3b9a7081:0:becomeActiveMaster-HFileCleaner.small.0-1733173595212,5,FailOnTimeoutGroup] 2024-12-02T21:08:25,289 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster-HFileCleaner.large.0-1733173595212 {}] cleaner.HFileCleaner(306): Exit Thread[master/7d4f3b9a7081:0:becomeActiveMaster-HFileCleaner.large.0-1733173595212,5,FailOnTimeoutGroup] 2024-12-02T21:08:25,290 INFO [M:0;7d4f3b9a7081:39967 {}] hbase.ChoreService(370): Chore service for: master/7d4f3b9a7081:0 had [] on shutdown 2024-12-02T21:08:25,290 DEBUG [M:0;7d4f3b9a7081:39967 {}] master.HMaster(1733): Stopping service threads 2024-12-02T21:08:25,290 INFO [M:0;7d4f3b9a7081:39967 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-02T21:08:25,291 INFO [M:0;7d4f3b9a7081:39967 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-02T21:08:25,291 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-02T21:08:25,297 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39967-0x1019927e59a0000, quorum=127.0.0.1:58068, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-02T21:08:25,297 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39967-0x1019927e59a0000, quorum=127.0.0.1:58068, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T21:08:25,297 DEBUG [M:0;7d4f3b9a7081:39967 {}] zookeeper.ZKUtil(347): master:39967-0x1019927e59a0000, quorum=127.0.0.1:58068, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-02T21:08:25,297 WARN [M:0;7d4f3b9a7081:39967 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-02T21:08:25,297 INFO [M:0;7d4f3b9a7081:39967 {}] assignment.AssignmentManager(391): Stopping assignment manager 2024-12-02T21:08:25,297 INFO [M:0;7d4f3b9a7081:39967 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-02T21:08:25,298 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:39967-0x1019927e59a0000, quorum=127.0.0.1:58068, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-02T21:08:25,298 DEBUG [M:0;7d4f3b9a7081:39967 {}] regionserver.HRegion(1681): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-02T21:08:25,298 INFO [M:0;7d4f3b9a7081:39967 {}] regionserver.HRegion(1703): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-02T21:08:25,298 DEBUG [M:0;7d4f3b9a7081:39967 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-02T21:08:25,298 DEBUG [M:0;7d4f3b9a7081:39967 {}] regionserver.HRegion(1791): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-02T21:08:25,298 DEBUG [M:0;7d4f3b9a7081:39967 {}] regionserver.HRegion(1801): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-02T21:08:25,299 INFO [M:0;7d4f3b9a7081:39967 {}] regionserver.HRegion(2837): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=40.20 KB heapSize=50.12 KB 2024-12-02T21:08:25,320 DEBUG [M:0;7d4f3b9a7081:39967 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39905/user/jenkins/test-data/2cbae3b8-f761-787a-953c-0b58f120ed37/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/7f236c0c2d5c41d7af0a3ce3feaf8abe is 82, key is hbase:meta,,1/info:regioninfo/1733173596210/Put/seqid=0 2024-12-02T21:08:25,325 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33281 is added to blk_1073741857_1033 (size=5672) 2024-12-02T21:08:25,326 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39177 is added to blk_1073741857_1033 (size=5672) 2024-12-02T21:08:25,326 INFO [M:0;7d4f3b9a7081:39967 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=104 (bloomFilter=true), to=hdfs://localhost:39905/user/jenkins/test-data/2cbae3b8-f761-787a-953c-0b58f120ed37/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/7f236c0c2d5c41d7af0a3ce3feaf8abe 2024-12-02T21:08:25,348 DEBUG [M:0;7d4f3b9a7081:39967 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39905/user/jenkins/test-data/2cbae3b8-f761-787a-953c-0b58f120ed37/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/5e04e7698c6946f9ba42e31a4933f5a8 is 765, key is \x00\x00\x00\x00\x00\x00\x00\x09/proc:d/1733173597757/Put/seqid=0 2024-12-02T21:08:25,354 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33281 is added to blk_1073741858_1034 (size=6425) 2024-12-02T21:08:25,354 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39177 is added to blk_1073741858_1034 (size=6425) 2024-12-02T21:08:25,355 INFO [M:0;7d4f3b9a7081:39967 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=39.59 KB at sequenceid=104 (bloomFilter=true), to=hdfs://localhost:39905/user/jenkins/test-data/2cbae3b8-f761-787a-953c-0b58f120ed37/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/5e04e7698c6946f9ba42e31a4933f5a8 2024-12-02T21:08:25,355 INFO [regionserver/7d4f3b9a7081:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-02T21:08:25,362 INFO [M:0;7d4f3b9a7081:39967 {}] regionserver.StoreFileReader(539): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 5e04e7698c6946f9ba42e31a4933f5a8 2024-12-02T21:08:25,376 DEBUG [M:0;7d4f3b9a7081:39967 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39905/user/jenkins/test-data/2cbae3b8-f761-787a-953c-0b58f120ed37/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/7b027ea7cc7b439d9bbc64557779c146 is 69, key is 7d4f3b9a7081,41569,1733173594323/rs:state/1733173595284/Put/seqid=0 2024-12-02T21:08:25,380 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41569-0x1019927e59a0001, quorum=127.0.0.1:58068, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-02T21:08:25,380 INFO [RS:0;7d4f3b9a7081:41569 {}] regionserver.HRegionServer(1307): Exiting; stopping=7d4f3b9a7081,41569,1733173594323; zookeeper connection closed. 2024-12-02T21:08:25,380 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41569-0x1019927e59a0001, quorum=127.0.0.1:58068, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-02T21:08:25,380 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@3ece9c9a {}] hbase.MiniHBaseCluster$SingleFileSystemShutdownThread(216): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@3ece9c9a 2024-12-02T21:08:25,381 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-12-02T21:08:25,382 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33281 is added to blk_1073741859_1035 (size=5156) 2024-12-02T21:08:25,382 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39177 is added to blk_1073741859_1035 (size=5156) 2024-12-02T21:08:25,382 INFO [M:0;7d4f3b9a7081:39967 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=104 (bloomFilter=true), to=hdfs://localhost:39905/user/jenkins/test-data/2cbae3b8-f761-787a-953c-0b58f120ed37/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/7b027ea7cc7b439d9bbc64557779c146 2024-12-02T21:08:25,402 DEBUG [M:0;7d4f3b9a7081:39967 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39905/user/jenkins/test-data/2cbae3b8-f761-787a-953c-0b58f120ed37/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/95ca97aa3d234dfab13b06a472cc591c is 52, key is load_balancer_on/state:d/1733173597266/Put/seqid=0 2024-12-02T21:08:25,407 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39177 is added to blk_1073741860_1036 (size=5056) 2024-12-02T21:08:25,407 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33281 is added to blk_1073741860_1036 (size=5056) 2024-12-02T21:08:25,408 INFO [M:0;7d4f3b9a7081:39967 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=104 (bloomFilter=true), to=hdfs://localhost:39905/user/jenkins/test-data/2cbae3b8-f761-787a-953c-0b58f120ed37/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/95ca97aa3d234dfab13b06a472cc591c 2024-12-02T21:08:25,415 DEBUG [M:0;7d4f3b9a7081:39967 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39905/user/jenkins/test-data/2cbae3b8-f761-787a-953c-0b58f120ed37/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/7f236c0c2d5c41d7af0a3ce3feaf8abe as hdfs://localhost:39905/user/jenkins/test-data/2cbae3b8-f761-787a-953c-0b58f120ed37/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/7f236c0c2d5c41d7af0a3ce3feaf8abe 2024-12-02T21:08:25,422 INFO [M:0;7d4f3b9a7081:39967 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39905/user/jenkins/test-data/2cbae3b8-f761-787a-953c-0b58f120ed37/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/7f236c0c2d5c41d7af0a3ce3feaf8abe, entries=8, sequenceid=104, filesize=5.5 K 2024-12-02T21:08:25,423 DEBUG [M:0;7d4f3b9a7081:39967 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39905/user/jenkins/test-data/2cbae3b8-f761-787a-953c-0b58f120ed37/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/5e04e7698c6946f9ba42e31a4933f5a8 as hdfs://localhost:39905/user/jenkins/test-data/2cbae3b8-f761-787a-953c-0b58f120ed37/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/5e04e7698c6946f9ba42e31a4933f5a8 2024-12-02T21:08:25,429 INFO [M:0;7d4f3b9a7081:39967 {}] regionserver.StoreFileReader(539): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 5e04e7698c6946f9ba42e31a4933f5a8 2024-12-02T21:08:25,430 INFO [M:0;7d4f3b9a7081:39967 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39905/user/jenkins/test-data/2cbae3b8-f761-787a-953c-0b58f120ed37/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/5e04e7698c6946f9ba42e31a4933f5a8, entries=11, sequenceid=104, filesize=6.3 K 2024-12-02T21:08:25,431 DEBUG [M:0;7d4f3b9a7081:39967 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39905/user/jenkins/test-data/2cbae3b8-f761-787a-953c-0b58f120ed37/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/7b027ea7cc7b439d9bbc64557779c146 as hdfs://localhost:39905/user/jenkins/test-data/2cbae3b8-f761-787a-953c-0b58f120ed37/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/7b027ea7cc7b439d9bbc64557779c146 2024-12-02T21:08:25,437 INFO [M:0;7d4f3b9a7081:39967 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39905/user/jenkins/test-data/2cbae3b8-f761-787a-953c-0b58f120ed37/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/7b027ea7cc7b439d9bbc64557779c146, entries=1, sequenceid=104, filesize=5.0 K 2024-12-02T21:08:25,439 DEBUG [M:0;7d4f3b9a7081:39967 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39905/user/jenkins/test-data/2cbae3b8-f761-787a-953c-0b58f120ed37/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/95ca97aa3d234dfab13b06a472cc591c as hdfs://localhost:39905/user/jenkins/test-data/2cbae3b8-f761-787a-953c-0b58f120ed37/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/95ca97aa3d234dfab13b06a472cc591c 2024-12-02T21:08:25,445 INFO [M:0;7d4f3b9a7081:39967 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39905/user/jenkins/test-data/2cbae3b8-f761-787a-953c-0b58f120ed37/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/95ca97aa3d234dfab13b06a472cc591c, entries=1, sequenceid=104, filesize=4.9 K 2024-12-02T21:08:25,447 INFO [M:0;7d4f3b9a7081:39967 {}] regionserver.HRegion(3040): Finished flush of dataSize ~40.20 KB/41161, heapSize ~50.05 KB/51256, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 149ms, sequenceid=104, compaction requested=false 2024-12-02T21:08:25,449 INFO [M:0;7d4f3b9a7081:39967 {}] regionserver.HRegion(1922): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-02T21:08:25,449 DEBUG [M:0;7d4f3b9a7081:39967 {}] regionserver.HRegion(1635): Region close journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-02T21:08:25,449 DEBUG [WAL-Shutdown-0 {}] wal.FSHLog(499): Closing WAL writer in /user/jenkins/test-data/2cbae3b8-f761-787a-953c-0b58f120ed37/MasterData/WALs/7d4f3b9a7081,39967,1733173593620 2024-12-02T21:08:25,451 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33281 is added to blk_1073741830_1006 (size=48462) 2024-12-02T21:08:25,451 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39177 is added to blk_1073741830_1006 (size=48462) 2024-12-02T21:08:25,452 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-12-02T21:08:25,452 INFO [M:0;7d4f3b9a7081:39967 {}] flush.MasterFlushTableProcedureManager(91): stop: server shutting down. 2024-12-02T21:08:25,452 INFO [M:0;7d4f3b9a7081:39967 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:39967 2024-12-02T21:08:25,461 DEBUG [M:0;7d4f3b9a7081:39967 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/rs/7d4f3b9a7081,39967,1733173593620 already deleted, retry=false 2024-12-02T21:08:25,572 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39967-0x1019927e59a0000, quorum=127.0.0.1:58068, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-02T21:08:25,572 INFO [M:0;7d4f3b9a7081:39967 {}] regionserver.HRegionServer(1307): Exiting; stopping=7d4f3b9a7081,39967,1733173593620; zookeeper connection closed. 2024-12-02T21:08:25,572 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39967-0x1019927e59a0000, quorum=127.0.0.1:58068, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-02T21:08:25,583 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@63d8281c{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-02T21:08:25,585 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@55b489f0{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-02T21:08:25,586 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-02T21:08:25,586 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@49fab02b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-02T21:08:25,586 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2170f3b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/380baec2-f3d4-8625-7ce2-4399a87d15dd/hadoop.log.dir/,STOPPED} 2024-12-02T21:08:25,588 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-02T21:08:25,588 WARN [BP-430769306-172.17.0.2-1733173590309 heartbeating to localhost/127.0.0.1:39905 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-02T21:08:25,588 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-02T21:08:25,588 WARN [BP-430769306-172.17.0.2-1733173590309 heartbeating to localhost/127.0.0.1:39905 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-430769306-172.17.0.2-1733173590309 (Datanode Uuid ddd49cb0-d098-4313-b5db-0c3aca344988) service to localhost/127.0.0.1:39905 2024-12-02T21:08:25,590 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/380baec2-f3d4-8625-7ce2-4399a87d15dd/cluster_67c3d1bd-51e2-48dc-a4c9-8f1d1435c878/dfs/data/data3/current/BP-430769306-172.17.0.2-1733173590309 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-02T21:08:25,590 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/380baec2-f3d4-8625-7ce2-4399a87d15dd/cluster_67c3d1bd-51e2-48dc-a4c9-8f1d1435c878/dfs/data/data4/current/BP-430769306-172.17.0.2-1733173590309 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-02T21:08:25,591 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-02T21:08:25,592 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@9f8c8e3{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-02T21:08:25,593 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@49793231{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-02T21:08:25,593 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-02T21:08:25,593 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4c2208{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-02T21:08:25,593 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4239ce1e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/380baec2-f3d4-8625-7ce2-4399a87d15dd/hadoop.log.dir/,STOPPED} 2024-12-02T21:08:25,594 WARN [BP-430769306-172.17.0.2-1733173590309 heartbeating to localhost/127.0.0.1:39905 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-02T21:08:25,594 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-02T21:08:25,594 WARN [BP-430769306-172.17.0.2-1733173590309 heartbeating to localhost/127.0.0.1:39905 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-430769306-172.17.0.2-1733173590309 (Datanode Uuid 50ad25c6-72f2-4a25-862d-e81d42b49b23) service to localhost/127.0.0.1:39905 2024-12-02T21:08:25,594 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-02T21:08:25,595 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/380baec2-f3d4-8625-7ce2-4399a87d15dd/cluster_67c3d1bd-51e2-48dc-a4c9-8f1d1435c878/dfs/data/data1/current/BP-430769306-172.17.0.2-1733173590309 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-02T21:08:25,595 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/380baec2-f3d4-8625-7ce2-4399a87d15dd/cluster_67c3d1bd-51e2-48dc-a4c9-8f1d1435c878/dfs/data/data2/current/BP-430769306-172.17.0.2-1733173590309 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-02T21:08:25,596 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-02T21:08:25,604 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4cd532bc{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-02T21:08:25,605 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@619b4309{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-02T21:08:25,605 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-02T21:08:25,605 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7c7fe1c5{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-02T21:08:25,605 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@37896107{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/380baec2-f3d4-8625-7ce2-4399a87d15dd/hadoop.log.dir/,STOPPED} 2024-12-02T21:08:25,614 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(345): Shutdown MiniZK cluster with all ZK servers 2024-12-02T21:08:25,647 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1347): Minicluster is down 2024-12-02T21:08:25,653 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testSlowSyncLogRolling Thread=64 (was 12) Potentially hanging thread: ForkJoinPool-2-worker-3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: nioEventLoopGroup-5-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: master/7d4f3b9a7081:0:becomeActiveMaster-MemStoreChunkPool Statistics java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: region-location-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-3-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-2-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: master/7d4f3b9a7081:0:becomeActiveMaster-MemStoreChunkPool Statistics java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-4-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: SSL Certificates Store Monitor java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: IPC Client (356647748) connection to localhost/127.0.0.1:39905 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: Time-limited test.named-queue-events-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-1 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HBase-Metrics2-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-4-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: regionserver/7d4f3b9a7081:0.procedureResultReporter java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Potentially hanging thread: SnapshotHandlerChoreCleaner java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async-Client-Retry-Timer-pool-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: RS-EventLoopGroup-3-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:39905 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (356647748) connection to localhost/127.0.0.1:39905 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:39905 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-4-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-5-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS-EventLoopGroup-1-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Timer for 'HBase' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: Monitor thread for TaskMonitor java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RpcClient-timer-pool-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS-EventLoopGroup-1-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: SessionTracker java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Potentially hanging thread: GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Potentially hanging thread: nioEventLoopGroup-2-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.0@localhost:39905 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Idle-Rpc-Conn-Sweeper-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-5-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:39905 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-3-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-3-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: ForkJoinPool-2-worker-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: region-location-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:39905 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: region-location-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS-EventLoopGroup-3-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-2-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS-EventLoopGroup-1-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: org.apache.hadoop.hdfs.PeerCache@1e7a9f39 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS-EventLoopGroup-3-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (356647748) connection to localhost/127.0.0.1:39905 from jenkins.hfs.0 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: ForkJoinPool-2-worker-6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) - Thread LEAK? -, OpenFileDescriptor=406 (was 286) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=63 (was 294), ProcessCount=11 (was 11), AvailableMemoryMB=7656 (was 8198) 2024-12-02T21:08:25,658 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnDatanodeDeath Thread=65, OpenFileDescriptor=406, MaxFileDescriptor=1048576, SystemLoadAverage=63, ProcessCount=11, AvailableMemoryMB=7656 2024-12-02T21:08:25,658 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1126): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-02T21:08:25,659 INFO [Time-limited test {}] hbase.HBaseTestingUtility(451): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/380baec2-f3d4-8625-7ce2-4399a87d15dd/hadoop.log.dir so I do NOT create it in target/test-data/0a82523b-981c-e30f-8f22-ae7c797f906e 2024-12-02T21:08:25,659 INFO [Time-limited test {}] hbase.HBaseTestingUtility(451): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/380baec2-f3d4-8625-7ce2-4399a87d15dd/hadoop.tmp.dir so I do NOT create it in target/test-data/0a82523b-981c-e30f-8f22-ae7c797f906e 2024-12-02T21:08:25,659 INFO [Time-limited test {}] hbase.HBaseZKTestingUtility(82): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/0a82523b-981c-e30f-8f22-ae7c797f906e/cluster_3abf8b3b-f6f4-828c-a519-1f257913387f, deleteOnExit=true 2024-12-02T21:08:25,659 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1140): STARTING DFS 2024-12-02T21:08:25,659 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/0a82523b-981c-e30f-8f22-ae7c797f906e/test.cache.data in system properties and HBase conf 2024-12-02T21:08:25,659 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/0a82523b-981c-e30f-8f22-ae7c797f906e/hadoop.tmp.dir in system properties and HBase conf 2024-12-02T21:08:25,659 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/0a82523b-981c-e30f-8f22-ae7c797f906e/hadoop.log.dir in system properties and HBase conf 2024-12-02T21:08:25,659 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/0a82523b-981c-e30f-8f22-ae7c797f906e/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-02T21:08:25,659 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/0a82523b-981c-e30f-8f22-ae7c797f906e/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-02T21:08:25,659 INFO [Time-limited test {}] hbase.HBaseTestingUtility(811): read short circuit is OFF 2024-12-02T21:08:25,659 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-02T21:08:25,660 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/0a82523b-981c-e30f-8f22-ae7c797f906e/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-02T21:08:25,660 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/0a82523b-981c-e30f-8f22-ae7c797f906e/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-02T21:08:25,660 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/0a82523b-981c-e30f-8f22-ae7c797f906e/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-02T21:08:25,660 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/0a82523b-981c-e30f-8f22-ae7c797f906e/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-02T21:08:25,660 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/0a82523b-981c-e30f-8f22-ae7c797f906e/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-02T21:08:25,660 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/0a82523b-981c-e30f-8f22-ae7c797f906e/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-02T21:08:25,660 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/0a82523b-981c-e30f-8f22-ae7c797f906e/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-02T21:08:25,660 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/0a82523b-981c-e30f-8f22-ae7c797f906e/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-02T21:08:25,660 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/0a82523b-981c-e30f-8f22-ae7c797f906e/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-02T21:08:25,660 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/0a82523b-981c-e30f-8f22-ae7c797f906e/nfs.dump.dir in system properties and HBase conf 2024-12-02T21:08:25,660 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/0a82523b-981c-e30f-8f22-ae7c797f906e/java.io.tmpdir in system properties and HBase conf 2024-12-02T21:08:25,660 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/0a82523b-981c-e30f-8f22-ae7c797f906e/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-02T21:08:25,660 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/0a82523b-981c-e30f-8f22-ae7c797f906e/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-02T21:08:25,661 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/0a82523b-981c-e30f-8f22-ae7c797f906e/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-02T21:08:25,673 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-02T21:08:25,930 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-02T21:08:25,934 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-02T21:08:25,935 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-02T21:08:25,936 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-02T21:08:25,936 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-02T21:08:25,936 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-02T21:08:25,937 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@58ba2427{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/0a82523b-981c-e30f-8f22-ae7c797f906e/hadoop.log.dir/,AVAILABLE} 2024-12-02T21:08:25,937 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1d621144{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-02T21:08:26,029 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@643c44f2{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/0a82523b-981c-e30f-8f22-ae7c797f906e/java.io.tmpdir/jetty-localhost-45531-hadoop-hdfs-3_4_1-tests_jar-_-any-8227280815220687507/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-02T21:08:26,029 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@72ab5857{HTTP/1.1, (http/1.1)}{localhost:45531} 2024-12-02T21:08:26,030 INFO [Time-limited test {}] server.Server(415): Started @117409ms 2024-12-02T21:08:26,040 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-02T21:08:26,195 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-02T21:08:26,198 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-02T21:08:26,198 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-02T21:08:26,198 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-02T21:08:26,198 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-02T21:08:26,199 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2b019b40{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/0a82523b-981c-e30f-8f22-ae7c797f906e/hadoop.log.dir/,AVAILABLE} 2024-12-02T21:08:26,199 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3efea70d{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-02T21:08:26,287 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@2f925d25{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/0a82523b-981c-e30f-8f22-ae7c797f906e/java.io.tmpdir/jetty-localhost-36571-hadoop-hdfs-3_4_1-tests_jar-_-any-18349608163279806081/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-02T21:08:26,287 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@1c800c5d{HTTP/1.1, (http/1.1)}{localhost:36571} 2024-12-02T21:08:26,288 INFO [Time-limited test {}] server.Server(415): Started @117667ms 2024-12-02T21:08:26,289 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-02T21:08:26,321 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-02T21:08:26,325 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-02T21:08:26,326 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-02T21:08:26,326 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-02T21:08:26,326 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-02T21:08:26,327 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@59c97a81{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/0a82523b-981c-e30f-8f22-ae7c797f906e/hadoop.log.dir/,AVAILABLE} 2024-12-02T21:08:26,327 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@108b35f9{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-02T21:08:26,417 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@ad49bc2{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/0a82523b-981c-e30f-8f22-ae7c797f906e/java.io.tmpdir/jetty-localhost-42931-hadoop-hdfs-3_4_1-tests_jar-_-any-14460439242829603729/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-02T21:08:26,418 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3a81d4c0{HTTP/1.1, (http/1.1)}{localhost:42931} 2024-12-02T21:08:26,418 INFO [Time-limited test {}] server.Server(415): Started @117798ms 2024-12-02T21:08:26,419 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-02T21:08:26,879 WARN [Thread-464 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/0a82523b-981c-e30f-8f22-ae7c797f906e/cluster_3abf8b3b-f6f4-828c-a519-1f257913387f/dfs/data/data1/current/BP-967350915-172.17.0.2-1733173705684/current, will proceed with Du for space computation calculation, 2024-12-02T21:08:26,879 WARN [Thread-465 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/0a82523b-981c-e30f-8f22-ae7c797f906e/cluster_3abf8b3b-f6f4-828c-a519-1f257913387f/dfs/data/data2/current/BP-967350915-172.17.0.2-1733173705684/current, will proceed with Du for space computation calculation, 2024-12-02T21:08:26,901 WARN [Thread-429 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-02T21:08:26,904 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x2cd66ccbbe4048c4 with lease ID 0x89dd5ecfbf8accea: Processing first storage report for DS-86240d69-8ed7-4081-b6bc-0889c709e6c1 from datanode DatanodeRegistration(127.0.0.1:42239, datanodeUuid=e5e7676b-015b-44d3-a217-81e86e95d876, infoPort=39689, infoSecurePort=0, ipcPort=39531, storageInfo=lv=-57;cid=testClusterID;nsid=1125043526;c=1733173705684) 2024-12-02T21:08:26,904 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x2cd66ccbbe4048c4 with lease ID 0x89dd5ecfbf8accea: from storage DS-86240d69-8ed7-4081-b6bc-0889c709e6c1 node DatanodeRegistration(127.0.0.1:42239, datanodeUuid=e5e7676b-015b-44d3-a217-81e86e95d876, infoPort=39689, infoSecurePort=0, ipcPort=39531, storageInfo=lv=-57;cid=testClusterID;nsid=1125043526;c=1733173705684), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-02T21:08:26,904 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x2cd66ccbbe4048c4 with lease ID 0x89dd5ecfbf8accea: Processing first storage report for DS-39cd48f2-635f-4b56-868a-fd1dacfdcff7 from datanode DatanodeRegistration(127.0.0.1:42239, datanodeUuid=e5e7676b-015b-44d3-a217-81e86e95d876, infoPort=39689, infoSecurePort=0, ipcPort=39531, storageInfo=lv=-57;cid=testClusterID;nsid=1125043526;c=1733173705684) 2024-12-02T21:08:26,904 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x2cd66ccbbe4048c4 with lease ID 0x89dd5ecfbf8accea: from storage DS-39cd48f2-635f-4b56-868a-fd1dacfdcff7 node DatanodeRegistration(127.0.0.1:42239, datanodeUuid=e5e7676b-015b-44d3-a217-81e86e95d876, infoPort=39689, infoSecurePort=0, ipcPort=39531, storageInfo=lv=-57;cid=testClusterID;nsid=1125043526;c=1733173705684), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-02T21:08:27,058 WARN [Thread-477 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/0a82523b-981c-e30f-8f22-ae7c797f906e/cluster_3abf8b3b-f6f4-828c-a519-1f257913387f/dfs/data/data4/current/BP-967350915-172.17.0.2-1733173705684/current, will proceed with Du for space computation calculation, 2024-12-02T21:08:27,058 WARN [Thread-476 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/0a82523b-981c-e30f-8f22-ae7c797f906e/cluster_3abf8b3b-f6f4-828c-a519-1f257913387f/dfs/data/data3/current/BP-967350915-172.17.0.2-1733173705684/current, will proceed with Du for space computation calculation, 2024-12-02T21:08:27,072 WARN [Thread-452 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-02T21:08:27,074 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x268f7ca9ea2300a with lease ID 0x89dd5ecfbf8acceb: Processing first storage report for DS-7999ded0-6f63-4ce0-8abb-657329e12700 from datanode DatanodeRegistration(127.0.0.1:44475, datanodeUuid=e2a33afa-21be-4db5-98c1-a8e5ef378ea1, infoPort=36911, infoSecurePort=0, ipcPort=38947, storageInfo=lv=-57;cid=testClusterID;nsid=1125043526;c=1733173705684) 2024-12-02T21:08:27,074 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x268f7ca9ea2300a with lease ID 0x89dd5ecfbf8acceb: from storage DS-7999ded0-6f63-4ce0-8abb-657329e12700 node DatanodeRegistration(127.0.0.1:44475, datanodeUuid=e2a33afa-21be-4db5-98c1-a8e5ef378ea1, infoPort=36911, infoSecurePort=0, ipcPort=38947, storageInfo=lv=-57;cid=testClusterID;nsid=1125043526;c=1733173705684), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-02T21:08:27,074 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x268f7ca9ea2300a with lease ID 0x89dd5ecfbf8acceb: Processing first storage report for DS-7884db6b-68f9-446a-bf88-5889e749b504 from datanode DatanodeRegistration(127.0.0.1:44475, datanodeUuid=e2a33afa-21be-4db5-98c1-a8e5ef378ea1, infoPort=36911, infoSecurePort=0, ipcPort=38947, storageInfo=lv=-57;cid=testClusterID;nsid=1125043526;c=1733173705684) 2024-12-02T21:08:27,075 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x268f7ca9ea2300a with lease ID 0x89dd5ecfbf8acceb: from storage DS-7884db6b-68f9-446a-bf88-5889e749b504 node DatanodeRegistration(127.0.0.1:44475, datanodeUuid=e2a33afa-21be-4db5-98c1-a8e5ef378ea1, infoPort=36911, infoSecurePort=0, ipcPort=38947, storageInfo=lv=-57;cid=testClusterID;nsid=1125043526;c=1733173705684), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-02T21:08:27,158 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(703): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/0a82523b-981c-e30f-8f22-ae7c797f906e 2024-12-02T21:08:27,162 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(259): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/0a82523b-981c-e30f-8f22-ae7c797f906e/cluster_3abf8b3b-f6f4-828c-a519-1f257913387f/zookeeper_0, clientPort=58708, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/0a82523b-981c-e30f-8f22-ae7c797f906e/cluster_3abf8b3b-f6f4-828c-a519-1f257913387f/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/0a82523b-981c-e30f-8f22-ae7c797f906e/cluster_3abf8b3b-f6f4-828c-a519-1f257913387f/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-02T21:08:27,163 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(284): Started MiniZooKeeperCluster and ran 'stat' on client port=58708 2024-12-02T21:08:27,163 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T21:08:27,164 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T21:08:27,174 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42239 is added to blk_1073741825_1001 (size=7) 2024-12-02T21:08:27,174 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44475 is added to blk_1073741825_1001 (size=7) 2024-12-02T21:08:27,176 INFO [Time-limited test {}] util.FSUtils(490): Created version file at hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7 with version=8 2024-12-02T21:08:27,176 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1462): The hbase.fs.tmp.dir is set to hdfs://localhost:39905/user/jenkins/test-data/2cbae3b8-f761-787a-953c-0b58f120ed37/hbase-staging 2024-12-02T21:08:27,178 INFO [Time-limited test {}] client.ConnectionUtils(129): master/7d4f3b9a7081:0 server-side Connection retries=45 2024-12-02T21:08:27,178 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-02T21:08:27,178 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-02T21:08:27,178 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-02T21:08:27,178 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-02T21:08:27,178 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-02T21:08:27,178 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-02T21:08:27,178 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-02T21:08:27,179 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:36403 2024-12-02T21:08:27,179 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T21:08:27,181 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T21:08:27,184 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=master:36403 connecting to ZooKeeper ensemble=127.0.0.1:58708 2024-12-02T21:08:27,242 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:364030x0, quorum=127.0.0.1:58708, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-02T21:08:27,242 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:36403-0x1019929a40e0000 connected 2024-12-02T21:08:27,305 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:36403-0x1019929a40e0000, quorum=127.0.0.1:58708, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-02T21:08:27,306 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:36403-0x1019929a40e0000, quorum=127.0.0.1:58708, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-02T21:08:27,307 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:36403-0x1019929a40e0000, quorum=127.0.0.1:58708, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-02T21:08:27,307 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=36403 2024-12-02T21:08:27,307 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=36403 2024-12-02T21:08:27,307 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=36403 2024-12-02T21:08:27,308 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=36403 2024-12-02T21:08:27,308 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=36403 2024-12-02T21:08:27,308 INFO [Time-limited test {}] master.HMaster(488): hbase.rootdir=hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7, hbase.cluster.distributed=false 2024-12-02T21:08:27,324 INFO [Time-limited test {}] client.ConnectionUtils(129): regionserver/7d4f3b9a7081:0 server-side Connection retries=45 2024-12-02T21:08:27,324 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-02T21:08:27,324 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-02T21:08:27,324 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-02T21:08:27,324 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-02T21:08:27,324 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-02T21:08:27,324 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-02T21:08:27,324 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-02T21:08:27,325 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:46239 2024-12-02T21:08:27,325 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-02T21:08:27,326 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-02T21:08:27,326 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T21:08:27,328 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T21:08:27,331 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=regionserver:46239 connecting to ZooKeeper ensemble=127.0.0.1:58708 2024-12-02T21:08:27,338 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:462390x0, quorum=127.0.0.1:58708, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-02T21:08:27,338 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:462390x0, quorum=127.0.0.1:58708, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-02T21:08:27,338 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:46239-0x1019929a40e0001 connected 2024-12-02T21:08:27,339 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:46239-0x1019929a40e0001, quorum=127.0.0.1:58708, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-02T21:08:27,339 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:46239-0x1019929a40e0001, quorum=127.0.0.1:58708, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-02T21:08:27,340 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=46239 2024-12-02T21:08:27,340 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=46239 2024-12-02T21:08:27,340 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=46239 2024-12-02T21:08:27,341 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=46239 2024-12-02T21:08:27,341 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=46239 2024-12-02T21:08:27,341 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] master.HMaster(2445): Adding backup master ZNode /hbase/backup-masters/7d4f3b9a7081,36403,1733173707177 2024-12-02T21:08:27,346 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36403-0x1019929a40e0000, quorum=127.0.0.1:58708, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-02T21:08:27,346 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46239-0x1019929a40e0001, quorum=127.0.0.1:58708, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-02T21:08:27,347 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:36403-0x1019929a40e0000, quorum=127.0.0.1:58708, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/7d4f3b9a7081,36403,1733173707177 2024-12-02T21:08:27,352 DEBUG [M:0;7d4f3b9a7081:36403 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;7d4f3b9a7081:36403 2024-12-02T21:08:27,354 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36403-0x1019929a40e0000, quorum=127.0.0.1:58708, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-02T21:08:27,354 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46239-0x1019929a40e0001, quorum=127.0.0.1:58708, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-02T21:08:27,355 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36403-0x1019929a40e0000, quorum=127.0.0.1:58708, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T21:08:27,355 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46239-0x1019929a40e0001, quorum=127.0.0.1:58708, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T21:08:27,355 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:36403-0x1019929a40e0000, quorum=127.0.0.1:58708, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-02T21:08:27,355 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/7d4f3b9a7081,36403,1733173707177 from backup master directory 2024-12-02T21:08:27,356 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(111): master:36403-0x1019929a40e0000, quorum=127.0.0.1:58708, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-02T21:08:27,363 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46239-0x1019929a40e0001, quorum=127.0.0.1:58708, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-02T21:08:27,363 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36403-0x1019929a40e0000, quorum=127.0.0.1:58708, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/7d4f3b9a7081,36403,1733173707177 2024-12-02T21:08:27,363 WARN [master/7d4f3b9a7081:0:becomeActiveMaster {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-02T21:08:27,363 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36403-0x1019929a40e0000, quorum=127.0.0.1:58708, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-02T21:08:27,363 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=7d4f3b9a7081,36403,1733173707177 2024-12-02T21:08:27,375 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42239 is added to blk_1073741826_1002 (size=42) 2024-12-02T21:08:27,376 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44475 is added to blk_1073741826_1002 (size=42) 2024-12-02T21:08:27,377 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] util.FSUtils(639): Created cluster ID file at hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/hbase.id with ID: b489ae95-5527-46fb-8697-9011c6abbd3f 2024-12-02T21:08:27,394 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T21:08:27,404 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36403-0x1019929a40e0000, quorum=127.0.0.1:58708, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T21:08:27,404 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46239-0x1019929a40e0001, quorum=127.0.0.1:58708, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T21:08:27,412 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44475 is added to blk_1073741827_1003 (size=196) 2024-12-02T21:08:27,412 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42239 is added to blk_1073741827_1003 (size=196) 2024-12-02T21:08:27,413 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] region.MasterRegion(372): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-02T21:08:27,414 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-02T21:08:27,415 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-02T21:08:27,422 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44475 is added to blk_1073741828_1004 (size=1189) 2024-12-02T21:08:27,422 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42239 is added to blk_1073741828_1004 (size=1189) 2024-12-02T21:08:27,423 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] regionserver.HRegion(7124): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/MasterData/data/master/store 2024-12-02T21:08:27,430 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42239 is added to blk_1073741829_1005 (size=34) 2024-12-02T21:08:27,430 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44475 is added to blk_1073741829_1005 (size=34) 2024-12-02T21:08:27,431 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] regionserver.HRegion(894): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T21:08:27,431 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] regionserver.HRegion(1681): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-02T21:08:27,431 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] regionserver.HRegion(1703): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-02T21:08:27,431 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] regionserver.HRegion(1724): Waiting without time limit for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-02T21:08:27,431 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] regionserver.HRegion(1791): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-02T21:08:27,431 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] regionserver.HRegion(1801): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-02T21:08:27,431 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] regionserver.HRegion(1922): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-02T21:08:27,431 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] regionserver.HRegion(1635): Region close journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-02T21:08:27,432 WARN [master/7d4f3b9a7081:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/MasterData/data/master/store/.initializing 2024-12-02T21:08:27,432 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/MasterData/WALs/7d4f3b9a7081,36403,1733173707177 2024-12-02T21:08:27,435 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=7d4f3b9a7081%2C36403%2C1733173707177, suffix=, logDir=hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/MasterData/WALs/7d4f3b9a7081,36403,1733173707177, archiveDir=hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/MasterData/oldWALs, maxLogs=10 2024-12-02T21:08:27,436 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7d4f3b9a7081%2C36403%2C1733173707177.1733173707435 2024-12-02T21:08:27,443 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/MasterData/WALs/7d4f3b9a7081,36403,1733173707177/7d4f3b9a7081%2C36403%2C1733173707177.1733173707435 2024-12-02T21:08:27,443 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39689:39689),(127.0.0.1/127.0.0.1:36911:36911)] 2024-12-02T21:08:27,443 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] regionserver.HRegion(7285): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-02T21:08:27,443 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] regionserver.HRegion(894): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T21:08:27,444 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] regionserver.HRegion(7327): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-02T21:08:27,444 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] regionserver.HRegion(7330): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-02T21:08:27,445 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-02T21:08:27,447 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-02T21:08:27,447 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T21:08:27,448 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-02T21:08:27,448 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-02T21:08:27,450 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-02T21:08:27,450 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T21:08:27,451 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T21:08:27,451 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-02T21:08:27,453 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-02T21:08:27,453 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T21:08:27,453 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T21:08:27,453 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-02T21:08:27,456 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-02T21:08:27,456 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T21:08:27,456 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T21:08:27,457 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-02T21:08:27,458 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-02T21:08:27,460 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-02T21:08:27,461 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] regionserver.HRegion(1085): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-02T21:08:27,464 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-02T21:08:27,465 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] regionserver.HRegion(1102): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=781167, jitterRate=-0.006695076823234558}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-02T21:08:27,466 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] regionserver.HRegion(1001): Region open journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-02T21:08:27,466 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-02T21:08:27,470 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1649d9b1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T21:08:27,471 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] master.HMaster(882): No meta location available on zookeeper, skip migrating... 2024-12-02T21:08:27,472 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-02T21:08:27,472 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(633): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-02T21:08:27,472 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-02T21:08:27,473 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(653): Recovered RegionProcedureStore lease in 0 msec 2024-12-02T21:08:27,473 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(667): Loaded RegionProcedureStore in 0 msec 2024-12-02T21:08:27,473 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-02T21:08:27,476 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-02T21:08:27,477 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36403-0x1019929a40e0000, quorum=127.0.0.1:58708, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-02T21:08:27,486 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/balancer already deleted, retry=false 2024-12-02T21:08:27,486 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-02T21:08:27,487 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36403-0x1019929a40e0000, quorum=127.0.0.1:58708, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-02T21:08:27,496 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/normalizer already deleted, retry=false 2024-12-02T21:08:27,496 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-02T21:08:27,497 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36403-0x1019929a40e0000, quorum=127.0.0.1:58708, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-02T21:08:27,504 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/switch/split already deleted, retry=false 2024-12-02T21:08:27,505 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36403-0x1019929a40e0000, quorum=127.0.0.1:58708, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-02T21:08:27,513 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/switch/merge already deleted, retry=false 2024-12-02T21:08:27,514 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36403-0x1019929a40e0000, quorum=127.0.0.1:58708, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-02T21:08:27,521 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-02T21:08:27,530 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46239-0x1019929a40e0001, quorum=127.0.0.1:58708, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-02T21:08:27,530 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36403-0x1019929a40e0000, quorum=127.0.0.1:58708, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-02T21:08:27,530 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36403-0x1019929a40e0000, quorum=127.0.0.1:58708, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T21:08:27,530 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46239-0x1019929a40e0001, quorum=127.0.0.1:58708, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T21:08:27,530 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] master.HMaster(826): Active/primary master=7d4f3b9a7081,36403,1733173707177, sessionid=0x1019929a40e0000, setting cluster-up flag (Was=false) 2024-12-02T21:08:27,546 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46239-0x1019929a40e0001, quorum=127.0.0.1:58708, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T21:08:27,546 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36403-0x1019929a40e0000, quorum=127.0.0.1:58708, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T21:08:27,571 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-02T21:08:27,572 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=7d4f3b9a7081,36403,1733173707177 2024-12-02T21:08:27,588 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46239-0x1019929a40e0001, quorum=127.0.0.1:58708, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T21:08:27,588 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36403-0x1019929a40e0000, quorum=127.0.0.1:58708, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T21:08:27,613 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-02T21:08:27,614 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=7d4f3b9a7081,36403,1733173707177 2024-12-02T21:08:27,617 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT; InitMetaProcedure table=hbase:meta 2024-12-02T21:08:27,617 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(575): slop=0.2 2024-12-02T21:08:27,617 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(294): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-02T21:08:27,618 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] balancer.RegionLocationFinder(146): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 7d4f3b9a7081,36403,1733173707177 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-02T21:08:27,618 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/7d4f3b9a7081:0, corePoolSize=5, maxPoolSize=5 2024-12-02T21:08:27,618 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/7d4f3b9a7081:0, corePoolSize=5, maxPoolSize=5 2024-12-02T21:08:27,618 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/7d4f3b9a7081:0, corePoolSize=5, maxPoolSize=5 2024-12-02T21:08:27,618 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/7d4f3b9a7081:0, corePoolSize=5, maxPoolSize=5 2024-12-02T21:08:27,618 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/7d4f3b9a7081:0, corePoolSize=10, maxPoolSize=10 2024-12-02T21:08:27,618 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/7d4f3b9a7081:0, corePoolSize=1, maxPoolSize=1 2024-12-02T21:08:27,618 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/7d4f3b9a7081:0, corePoolSize=2, maxPoolSize=2 2024-12-02T21:08:27,618 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/7d4f3b9a7081:0, corePoolSize=1, maxPoolSize=1 2024-12-02T21:08:27,619 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733173737619 2024-12-02T21:08:27,619 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-02T21:08:27,619 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-02T21:08:27,619 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-02T21:08:27,619 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-02T21:08:27,620 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-02T21:08:27,620 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-02T21:08:27,620 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-02T21:08:27,620 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(96): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, locked=true; InitMetaProcedure table=hbase:meta 2024-12-02T21:08:27,620 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-02T21:08:27,620 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(75): BOOTSTRAP: creating hbase:meta region 2024-12-02T21:08:27,620 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-02T21:08:27,620 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-02T21:08:27,621 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-02T21:08:27,621 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-02T21:08:27,621 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/7d4f3b9a7081:0:becomeActiveMaster-HFileCleaner.large.0-1733173707621,5,FailOnTimeoutGroup] 2024-12-02T21:08:27,621 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/7d4f3b9a7081:0:becomeActiveMaster-HFileCleaner.small.0-1733173707621,5,FailOnTimeoutGroup] 2024-12-02T21:08:27,621 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-02T21:08:27,621 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] master.HMaster(1680): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-02T21:08:27,621 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-02T21:08:27,621 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-02T21:08:27,621 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T21:08:27,621 INFO [PEWorker-1 {}] util.FSTableDescriptors(133): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-02T21:08:27,633 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42239 is added to blk_1073741831_1007 (size=1039) 2024-12-02T21:08:27,633 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44475 is added to blk_1073741831_1007 (size=1039) 2024-12-02T21:08:27,635 INFO [PEWorker-1 {}] util.FSTableDescriptors(140): Updated hbase:meta table descriptor to hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1039 2024-12-02T21:08:27,635 INFO [PEWorker-1 {}] regionserver.HRegion(7106): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7 2024-12-02T21:08:27,650 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42239 is added to blk_1073741832_1008 (size=32) 2024-12-02T21:08:27,650 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44475 is added to blk_1073741832_1008 (size=32) 2024-12-02T21:08:27,651 DEBUG [PEWorker-1 {}] regionserver.HRegion(894): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T21:08:27,652 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-02T21:08:27,654 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-02T21:08:27,654 DEBUG [RS:0;7d4f3b9a7081:46239 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;7d4f3b9a7081:46239 2024-12-02T21:08:27,655 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T21:08:27,656 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-02T21:08:27,656 INFO [RS:0;7d4f3b9a7081:46239 {}] regionserver.HRegionServer(1008): ClusterId : b489ae95-5527-46fb-8697-9011c6abbd3f 2024-12-02T21:08:27,656 DEBUG [RS:0;7d4f3b9a7081:46239 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-02T21:08:27,656 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-02T21:08:27,658 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-02T21:08:27,658 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T21:08:27,659 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-02T21:08:27,659 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-02T21:08:27,661 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-02T21:08:27,661 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T21:08:27,661 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-02T21:08:27,662 DEBUG [PEWorker-1 {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/data/hbase/meta/1588230740 2024-12-02T21:08:27,663 DEBUG [PEWorker-1 {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/data/hbase/meta/1588230740 2024-12-02T21:08:27,664 DEBUG [RS:0;7d4f3b9a7081:46239 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-02T21:08:27,664 DEBUG [RS:0;7d4f3b9a7081:46239 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-02T21:08:27,664 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-02T21:08:27,665 DEBUG [PEWorker-1 {}] regionserver.HRegion(1085): writing seq id for 1588230740 2024-12-02T21:08:27,668 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-02T21:08:27,668 INFO [PEWorker-1 {}] regionserver.HRegion(1102): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=858889, jitterRate=0.09213411808013916}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-02T21:08:27,669 DEBUG [PEWorker-1 {}] regionserver.HRegion(1001): Region open journal for 1588230740: 2024-12-02T21:08:27,670 DEBUG [PEWorker-1 {}] regionserver.HRegion(1681): Closing 1588230740, disabling compactions & flushes 2024-12-02T21:08:27,670 INFO [PEWorker-1 {}] regionserver.HRegion(1703): Closing region hbase:meta,,1.1588230740 2024-12-02T21:08:27,670 DEBUG [PEWorker-1 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:meta,,1.1588230740 2024-12-02T21:08:27,670 DEBUG [PEWorker-1 {}] regionserver.HRegion(1791): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-02T21:08:27,670 DEBUG [PEWorker-1 {}] regionserver.HRegion(1801): Updates disabled for region hbase:meta,,1.1588230740 2024-12-02T21:08:27,670 INFO [PEWorker-1 {}] regionserver.HRegion(1922): Closed hbase:meta,,1.1588230740 2024-12-02T21:08:27,670 DEBUG [PEWorker-1 {}] regionserver.HRegion(1635): Region close journal for 1588230740: 2024-12-02T21:08:27,672 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(96): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, locked=true; InitMetaProcedure table=hbase:meta 2024-12-02T21:08:27,672 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(107): Going to assign meta 2024-12-02T21:08:27,672 DEBUG [RS:0;7d4f3b9a7081:46239 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-02T21:08:27,672 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-02T21:08:27,672 DEBUG [RS:0;7d4f3b9a7081:46239 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@59c8d265, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T21:08:27,672 DEBUG [RS:0;7d4f3b9a7081:46239 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@24c61c2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=7d4f3b9a7081/172.17.0.2:0 2024-12-02T21:08:27,672 INFO [RS:0;7d4f3b9a7081:46239 {}] regionserver.RegionServerCoprocessorHost(67): System coprocessor loading is enabled 2024-12-02T21:08:27,673 INFO [RS:0;7d4f3b9a7081:46239 {}] regionserver.RegionServerCoprocessorHost(68): Table coprocessor loading is enabled 2024-12-02T21:08:27,673 DEBUG [RS:0;7d4f3b9a7081:46239 {}] regionserver.HRegionServer(1090): About to register with Master. 2024-12-02T21:08:27,673 INFO [RS:0;7d4f3b9a7081:46239 {}] regionserver.HRegionServer(3073): reportForDuty to master=7d4f3b9a7081,36403,1733173707177 with isa=7d4f3b9a7081/172.17.0.2:46239, startcode=1733173707323 2024-12-02T21:08:27,673 DEBUG [RS:0;7d4f3b9a7081:46239 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-02T21:08:27,673 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-02T21:08:27,674 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-02T21:08:27,676 INFO [RS-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39341, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=RegionServerStatusService 2024-12-02T21:08:27,676 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36403 {}] master.ServerManager(332): Checking decommissioned status of RegionServer 7d4f3b9a7081,46239,1733173707323 2024-12-02T21:08:27,676 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36403 {}] master.ServerManager(486): Registering regionserver=7d4f3b9a7081,46239,1733173707323 2024-12-02T21:08:27,678 DEBUG [RS:0;7d4f3b9a7081:46239 {}] regionserver.HRegionServer(1725): Config from master: hbase.rootdir=hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7 2024-12-02T21:08:27,678 DEBUG [RS:0;7d4f3b9a7081:46239 {}] regionserver.HRegionServer(1725): Config from master: fs.defaultFS=hdfs://localhost:40413 2024-12-02T21:08:27,678 DEBUG [RS:0;7d4f3b9a7081:46239 {}] regionserver.HRegionServer(1725): Config from master: hbase.master.info.port=-1 2024-12-02T21:08:27,688 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36403-0x1019929a40e0000, quorum=127.0.0.1:58708, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-02T21:08:27,688 DEBUG [RS:0;7d4f3b9a7081:46239 {}] zookeeper.ZKUtil(111): regionserver:46239-0x1019929a40e0001, quorum=127.0.0.1:58708, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/7d4f3b9a7081,46239,1733173707323 2024-12-02T21:08:27,688 WARN [RS:0;7d4f3b9a7081:46239 {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-02T21:08:27,688 INFO [RS:0;7d4f3b9a7081:46239 {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-02T21:08:27,688 DEBUG [RS:0;7d4f3b9a7081:46239 {}] regionserver.HRegionServer(2100): logDir=hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323 2024-12-02T21:08:27,689 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [7d4f3b9a7081,46239,1733173707323] 2024-12-02T21:08:27,692 DEBUG [RS:0;7d4f3b9a7081:46239 {}] regionserver.Replication(140): Replication stats-in-log period=300 seconds 2024-12-02T21:08:27,692 INFO [RS:0;7d4f3b9a7081:46239 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-02T21:08:27,695 INFO [RS:0;7d4f3b9a7081:46239 {}] regionserver.MemStoreFlusher(130): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-02T21:08:27,699 INFO [RS:0;7d4f3b9a7081:46239 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-02T21:08:27,699 INFO [RS:0;7d4f3b9a7081:46239 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-02T21:08:27,699 INFO [RS:0;7d4f3b9a7081:46239 {}] regionserver.HRegionServer$CompactionChecker(1988): CompactionChecker runs every PT1S 2024-12-02T21:08:27,701 INFO [RS:0;7d4f3b9a7081:46239 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-02T21:08:27,701 DEBUG [RS:0;7d4f3b9a7081:46239 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/7d4f3b9a7081:0, corePoolSize=1, maxPoolSize=1 2024-12-02T21:08:27,701 DEBUG [RS:0;7d4f3b9a7081:46239 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/7d4f3b9a7081:0, corePoolSize=1, maxPoolSize=1 2024-12-02T21:08:27,701 DEBUG [RS:0;7d4f3b9a7081:46239 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/7d4f3b9a7081:0, corePoolSize=1, maxPoolSize=1 2024-12-02T21:08:27,701 DEBUG [RS:0;7d4f3b9a7081:46239 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/7d4f3b9a7081:0, corePoolSize=1, maxPoolSize=1 2024-12-02T21:08:27,701 DEBUG [RS:0;7d4f3b9a7081:46239 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/7d4f3b9a7081:0, corePoolSize=1, maxPoolSize=1 2024-12-02T21:08:27,701 DEBUG [RS:0;7d4f3b9a7081:46239 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/7d4f3b9a7081:0, corePoolSize=2, maxPoolSize=2 2024-12-02T21:08:27,701 DEBUG [RS:0;7d4f3b9a7081:46239 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/7d4f3b9a7081:0, corePoolSize=1, maxPoolSize=1 2024-12-02T21:08:27,701 DEBUG [RS:0;7d4f3b9a7081:46239 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/7d4f3b9a7081:0, corePoolSize=1, maxPoolSize=1 2024-12-02T21:08:27,701 DEBUG [RS:0;7d4f3b9a7081:46239 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/7d4f3b9a7081:0, corePoolSize=1, maxPoolSize=1 2024-12-02T21:08:27,702 DEBUG [RS:0;7d4f3b9a7081:46239 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/7d4f3b9a7081:0, corePoolSize=1, maxPoolSize=1 2024-12-02T21:08:27,702 DEBUG [RS:0;7d4f3b9a7081:46239 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/7d4f3b9a7081:0, corePoolSize=1, maxPoolSize=1 2024-12-02T21:08:27,702 DEBUG [RS:0;7d4f3b9a7081:46239 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/7d4f3b9a7081:0, corePoolSize=3, maxPoolSize=3 2024-12-02T21:08:27,702 DEBUG [RS:0;7d4f3b9a7081:46239 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/7d4f3b9a7081:0, corePoolSize=3, maxPoolSize=3 2024-12-02T21:08:27,702 INFO [RS:0;7d4f3b9a7081:46239 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-02T21:08:27,702 INFO [RS:0;7d4f3b9a7081:46239 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-02T21:08:27,702 INFO [RS:0;7d4f3b9a7081:46239 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-02T21:08:27,703 INFO [RS:0;7d4f3b9a7081:46239 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-02T21:08:27,703 INFO [RS:0;7d4f3b9a7081:46239 {}] hbase.ChoreService(168): Chore ScheduledChore name=7d4f3b9a7081,46239,1733173707323-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-02T21:08:27,720 INFO [RS:0;7d4f3b9a7081:46239 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-02T21:08:27,721 INFO [RS:0;7d4f3b9a7081:46239 {}] hbase.ChoreService(168): Chore ScheduledChore name=7d4f3b9a7081,46239,1733173707323-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-02T21:08:27,735 INFO [RS:0;7d4f3b9a7081:46239 {}] regionserver.Replication(204): 7d4f3b9a7081,46239,1733173707323 started 2024-12-02T21:08:27,735 INFO [RS:0;7d4f3b9a7081:46239 {}] regionserver.HRegionServer(1767): Serving as 7d4f3b9a7081,46239,1733173707323, RpcServer on 7d4f3b9a7081/172.17.0.2:46239, sessionid=0x1019929a40e0001 2024-12-02T21:08:27,735 DEBUG [RS:0;7d4f3b9a7081:46239 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-02T21:08:27,735 DEBUG [RS:0;7d4f3b9a7081:46239 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 7d4f3b9a7081,46239,1733173707323 2024-12-02T21:08:27,735 DEBUG [RS:0;7d4f3b9a7081:46239 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '7d4f3b9a7081,46239,1733173707323' 2024-12-02T21:08:27,735 DEBUG [RS:0;7d4f3b9a7081:46239 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-02T21:08:27,736 DEBUG [RS:0;7d4f3b9a7081:46239 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-02T21:08:27,736 DEBUG [RS:0;7d4f3b9a7081:46239 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-02T21:08:27,736 DEBUG [RS:0;7d4f3b9a7081:46239 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-02T21:08:27,737 DEBUG [RS:0;7d4f3b9a7081:46239 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 7d4f3b9a7081,46239,1733173707323 2024-12-02T21:08:27,737 DEBUG [RS:0;7d4f3b9a7081:46239 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '7d4f3b9a7081,46239,1733173707323' 2024-12-02T21:08:27,737 DEBUG [RS:0;7d4f3b9a7081:46239 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-02T21:08:27,737 DEBUG [RS:0;7d4f3b9a7081:46239 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-02T21:08:27,737 DEBUG [RS:0;7d4f3b9a7081:46239 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-02T21:08:27,737 INFO [RS:0;7d4f3b9a7081:46239 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-02T21:08:27,737 INFO [RS:0;7d4f3b9a7081:46239 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-02T21:08:27,825 WARN [7d4f3b9a7081:36403 {}] assignment.AssignmentManager(2423): No servers available; cannot place 1 unassigned regions. 2024-12-02T21:08:27,840 INFO [RS:0;7d4f3b9a7081:46239 {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=7d4f3b9a7081%2C46239%2C1733173707323, suffix=, logDir=hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323, archiveDir=hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/oldWALs, maxLogs=32 2024-12-02T21:08:27,842 INFO [RS:0;7d4f3b9a7081:46239 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7d4f3b9a7081%2C46239%2C1733173707323.1733173707842 2024-12-02T21:08:27,852 INFO [RS:0;7d4f3b9a7081:46239 {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.1733173707842 2024-12-02T21:08:27,852 DEBUG [RS:0;7d4f3b9a7081:46239 {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39689:39689),(127.0.0.1/127.0.0.1:36911:36911)] 2024-12-02T21:08:28,075 DEBUG [7d4f3b9a7081:36403 {}] assignment.AssignmentManager(2444): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-12-02T21:08:28,076 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=7d4f3b9a7081,46239,1733173707323 2024-12-02T21:08:28,078 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 7d4f3b9a7081,46239,1733173707323, state=OPENING 2024-12-02T21:08:28,121 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-02T21:08:28,129 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36403-0x1019929a40e0000, quorum=127.0.0.1:58708, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T21:08:28,129 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46239-0x1019929a40e0001, quorum=127.0.0.1:58708, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T21:08:28,131 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE; OpenRegionProcedure 1588230740, server=7d4f3b9a7081,46239,1733173707323}] 2024-12-02T21:08:28,131 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-02T21:08:28,131 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-02T21:08:28,286 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 7d4f3b9a7081,46239,1733173707323 2024-12-02T21:08:28,287 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-02T21:08:28,293 INFO [RS-EventLoopGroup-6-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49636, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-02T21:08:28,299 INFO [RS_OPEN_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(135): Open hbase:meta,,1.1588230740 2024-12-02T21:08:28,300 INFO [RS_OPEN_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-02T21:08:28,303 INFO [RS_OPEN_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=7d4f3b9a7081%2C46239%2C1733173707323.meta, suffix=.meta, logDir=hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323, archiveDir=hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/oldWALs, maxLogs=32 2024-12-02T21:08:28,307 INFO [RS_OPEN_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 7d4f3b9a7081%2C46239%2C1733173707323.meta.1733173708307.meta 2024-12-02T21:08:28,315 INFO [RS_OPEN_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.meta.1733173708307.meta 2024-12-02T21:08:28,316 DEBUG [RS_OPEN_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36911:36911),(127.0.0.1/127.0.0.1:39689:39689)] 2024-12-02T21:08:28,316 DEBUG [RS_OPEN_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7285): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-02T21:08:28,316 DEBUG [RS_OPEN_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-02T21:08:28,316 DEBUG [RS_OPEN_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7999): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-02T21:08:28,316 INFO [RS_OPEN_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(436): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-02T21:08:28,316 DEBUG [RS_OPEN_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-02T21:08:28,317 DEBUG [RS_OPEN_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(894): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T21:08:28,317 DEBUG [RS_OPEN_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7327): checking encryption for 1588230740 2024-12-02T21:08:28,317 DEBUG [RS_OPEN_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7330): checking classloading for 1588230740 2024-12-02T21:08:28,318 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-02T21:08:28,319 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-02T21:08:28,319 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T21:08:28,320 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-02T21:08:28,320 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-02T21:08:28,321 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-02T21:08:28,321 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T21:08:28,321 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-02T21:08:28,321 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-02T21:08:28,322 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-02T21:08:28,322 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T21:08:28,322 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-02T21:08:28,323 DEBUG [RS_OPEN_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/data/hbase/meta/1588230740 2024-12-02T21:08:28,325 DEBUG [RS_OPEN_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/data/hbase/meta/1588230740 2024-12-02T21:08:28,326 DEBUG [RS_OPEN_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-02T21:08:28,328 DEBUG [RS_OPEN_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1085): writing seq id for 1588230740 2024-12-02T21:08:28,329 INFO [RS_OPEN_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1102): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=837792, jitterRate=0.06530767679214478}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-02T21:08:28,329 DEBUG [RS_OPEN_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1001): Region open journal for 1588230740: 2024-12-02T21:08:28,330 INFO [RS_OPEN_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2601): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733173708286 2024-12-02T21:08:28,333 DEBUG [RS_OPEN_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2628): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-02T21:08:28,333 INFO [RS_OPEN_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(164): Opened hbase:meta,,1.1588230740 2024-12-02T21:08:28,333 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=7d4f3b9a7081,46239,1733173707323 2024-12-02T21:08:28,334 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 7d4f3b9a7081,46239,1733173707323, state=OPEN 2024-12-02T21:08:28,367 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36403-0x1019929a40e0000, quorum=127.0.0.1:58708, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-02T21:08:28,367 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46239-0x1019929a40e0001, quorum=127.0.0.1:58708, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-02T21:08:28,367 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-02T21:08:28,367 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-02T21:08:28,370 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=3, resume processing ppid=2 2024-12-02T21:08:28,370 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=3, ppid=2, state=SUCCESS; OpenRegionProcedure 1588230740, server=7d4f3b9a7081,46239,1733173707323 in 237 msec 2024-12-02T21:08:28,373 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=2, resume processing ppid=1 2024-12-02T21:08:28,374 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=2, ppid=1, state=SUCCESS; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 698 msec 2024-12-02T21:08:28,377 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=1, state=SUCCESS; InitMetaProcedure table=hbase:meta in 758 msec 2024-12-02T21:08:28,377 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] master.HMaster(1088): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733173708377, completionTime=-1 2024-12-02T21:08:28,377 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] master.ServerManager(907): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-12-02T21:08:28,377 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] assignment.AssignmentManager(1747): Joining cluster... 2024-12-02T21:08:28,378 DEBUG [hconnection-0x786ad4e4-shared-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T21:08:28,379 INFO [RS-EventLoopGroup-6-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49642, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T21:08:28,380 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] assignment.AssignmentManager(1759): Number of RegionServers=1 2024-12-02T21:08:28,380 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733173768380 2024-12-02T21:08:28,380 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733173828380 2024-12-02T21:08:28,380 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] assignment.AssignmentManager(1766): Joined the cluster in 3 msec 2024-12-02T21:08:28,405 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7d4f3b9a7081,36403,1733173707177-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-02T21:08:28,405 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7d4f3b9a7081,36403,1733173707177-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-02T21:08:28,405 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7d4f3b9a7081,36403,1733173707177-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-02T21:08:28,405 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-7d4f3b9a7081:36403, period=300000, unit=MILLISECONDS is enabled. 2024-12-02T21:08:28,405 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-02T21:08:28,405 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] master.TableNamespaceManager(92): Namespace table not found. Creating... 2024-12-02T21:08:28,405 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] master.HMaster(2425): Client=null/null create 'hbase:namespace', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '10', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-02T21:08:28,407 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=hbase:namespace 2024-12-02T21:08:28,407 DEBUG [master/7d4f3b9a7081:0.Chore.1 {}] janitor.CatalogJanitor(179): 2024-12-02T21:08:28,409 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_PRE_OPERATION 2024-12-02T21:08:28,409 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T21:08:28,410 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-02T21:08:28,419 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42239 is added to blk_1073741835_1011 (size=358) 2024-12-02T21:08:28,420 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44475 is added to blk_1073741835_1011 (size=358) 2024-12-02T21:08:28,422 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => c3ed35c3efec05998439d93744730396, NAME => 'hbase:namespace,,1733173708405.c3ed35c3efec05998439d93744730396.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:namespace', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '10', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7 2024-12-02T21:08:28,433 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44475 is added to blk_1073741836_1012 (size=42) 2024-12-02T21:08:28,433 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42239 is added to blk_1073741836_1012 (size=42) 2024-12-02T21:08:28,434 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(894): Instantiated hbase:namespace,,1733173708405.c3ed35c3efec05998439d93744730396.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T21:08:28,434 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1681): Closing c3ed35c3efec05998439d93744730396, disabling compactions & flushes 2024-12-02T21:08:28,434 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1703): Closing region hbase:namespace,,1733173708405.c3ed35c3efec05998439d93744730396. 2024-12-02T21:08:28,434 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:namespace,,1733173708405.c3ed35c3efec05998439d93744730396. 2024-12-02T21:08:28,434 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on hbase:namespace,,1733173708405.c3ed35c3efec05998439d93744730396. after waiting 0 ms 2024-12-02T21:08:28,434 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region hbase:namespace,,1733173708405.c3ed35c3efec05998439d93744730396. 2024-12-02T21:08:28,435 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1922): Closed hbase:namespace,,1733173708405.c3ed35c3efec05998439d93744730396. 2024-12-02T21:08:28,435 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1635): Region close journal for c3ed35c3efec05998439d93744730396: 2024-12-02T21:08:28,436 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_ADD_TO_META 2024-12-02T21:08:28,437 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"hbase:namespace,,1733173708405.c3ed35c3efec05998439d93744730396.","families":{"info":[{"qualifier":"regioninfo","vlen":41,"tag":[],"timestamp":"1733173708437"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733173708437"}]},"ts":"1733173708437"} 2024-12-02T21:08:28,440 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-02T21:08:28,441 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-02T21:08:28,442 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:namespace","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733173708441"}]},"ts":"1733173708441"} 2024-12-02T21:08:28,444 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:namespace, state=ENABLING in hbase:meta 2024-12-02T21:08:28,463 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:namespace, region=c3ed35c3efec05998439d93744730396, ASSIGN}] 2024-12-02T21:08:28,465 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:namespace, region=c3ed35c3efec05998439d93744730396, ASSIGN 2024-12-02T21:08:28,467 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=hbase:namespace, region=c3ed35c3efec05998439d93744730396, ASSIGN; state=OFFLINE, location=7d4f3b9a7081,46239,1733173707323; forceNewPlan=false, retain=false 2024-12-02T21:08:28,618 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=5 updating hbase:meta row=c3ed35c3efec05998439d93744730396, regionState=OPENING, regionLocation=7d4f3b9a7081,46239,1733173707323 2024-12-02T21:08:28,622 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE; OpenRegionProcedure c3ed35c3efec05998439d93744730396, server=7d4f3b9a7081,46239,1733173707323}] 2024-12-02T21:08:28,779 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 7d4f3b9a7081,46239,1733173707323 2024-12-02T21:08:28,789 INFO [RS_OPEN_PRIORITY_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(135): Open hbase:namespace,,1733173708405.c3ed35c3efec05998439d93744730396. 2024-12-02T21:08:28,789 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7285): Opening region: {ENCODED => c3ed35c3efec05998439d93744730396, NAME => 'hbase:namespace,,1733173708405.c3ed35c3efec05998439d93744730396.', STARTKEY => '', ENDKEY => ''} 2024-12-02T21:08:28,790 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table namespace c3ed35c3efec05998439d93744730396 2024-12-02T21:08:28,791 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(894): Instantiated hbase:namespace,,1733173708405.c3ed35c3efec05998439d93744730396.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T21:08:28,791 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7327): checking encryption for c3ed35c3efec05998439d93744730396 2024-12-02T21:08:28,791 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7330): checking classloading for c3ed35c3efec05998439d93744730396 2024-12-02T21:08:28,794 INFO [StoreOpener-c3ed35c3efec05998439d93744730396-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region c3ed35c3efec05998439d93744730396 2024-12-02T21:08:28,796 INFO [StoreOpener-c3ed35c3efec05998439d93744730396-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region c3ed35c3efec05998439d93744730396 columnFamilyName info 2024-12-02T21:08:28,796 DEBUG [StoreOpener-c3ed35c3efec05998439d93744730396-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T21:08:28,797 INFO [StoreOpener-c3ed35c3efec05998439d93744730396-1 {}] regionserver.HStore(327): Store=c3ed35c3efec05998439d93744730396/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T21:08:28,798 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/data/hbase/namespace/c3ed35c3efec05998439d93744730396 2024-12-02T21:08:28,799 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/data/hbase/namespace/c3ed35c3efec05998439d93744730396 2024-12-02T21:08:28,801 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1085): writing seq id for c3ed35c3efec05998439d93744730396 2024-12-02T21:08:28,804 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/data/hbase/namespace/c3ed35c3efec05998439d93744730396/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-02T21:08:28,804 INFO [RS_OPEN_PRIORITY_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1102): Opened c3ed35c3efec05998439d93744730396; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=780240, jitterRate=-0.007873564958572388}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-02T21:08:28,805 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1001): Region open journal for c3ed35c3efec05998439d93744730396: 2024-12-02T21:08:28,806 INFO [RS_OPEN_PRIORITY_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2601): Post open deploy tasks for hbase:namespace,,1733173708405.c3ed35c3efec05998439d93744730396., pid=6, masterSystemTime=1733173708779 2024-12-02T21:08:28,808 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2628): Finished post open deploy task for hbase:namespace,,1733173708405.c3ed35c3efec05998439d93744730396. 2024-12-02T21:08:28,809 INFO [RS_OPEN_PRIORITY_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(164): Opened hbase:namespace,,1733173708405.c3ed35c3efec05998439d93744730396. 2024-12-02T21:08:28,809 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=5 updating hbase:meta row=c3ed35c3efec05998439d93744730396, regionState=OPEN, openSeqNum=2, regionLocation=7d4f3b9a7081,46239,1733173707323 2024-12-02T21:08:28,815 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=6, resume processing ppid=5 2024-12-02T21:08:28,815 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=6, ppid=5, state=SUCCESS; OpenRegionProcedure c3ed35c3efec05998439d93744730396, server=7d4f3b9a7081,46239,1733173707323 in 190 msec 2024-12-02T21:08:28,819 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=5, resume processing ppid=4 2024-12-02T21:08:28,819 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=5, ppid=4, state=SUCCESS; TransitRegionStateProcedure table=hbase:namespace, region=c3ed35c3efec05998439d93744730396, ASSIGN in 352 msec 2024-12-02T21:08:28,820 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-02T21:08:28,821 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:namespace","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733173708820"}]},"ts":"1733173708820"} 2024-12-02T21:08:28,824 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:namespace, state=ENABLED in hbase:meta 2024-12-02T21:08:28,831 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_POST_OPERATION 2024-12-02T21:08:28,833 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=4, state=SUCCESS; CreateTableProcedure table=hbase:namespace in 426 msec 2024-12-02T21:08:28,909 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] zookeeper.ZKUtil(113): master:36403-0x1019929a40e0000, quorum=127.0.0.1:58708, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/namespace 2024-12-02T21:08:28,947 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36403-0x1019929a40e0000, quorum=127.0.0.1:58708, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/namespace 2024-12-02T21:08:28,947 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46239-0x1019929a40e0001, quorum=127.0.0.1:58708, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T21:08:28,947 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36403-0x1019929a40e0000, quorum=127.0.0.1:58708, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T21:08:28,957 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=7, state=RUNNABLE:CREATE_NAMESPACE_PREPARE; CreateNamespaceProcedure, namespace=default 2024-12-02T21:08:28,971 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36403-0x1019929a40e0000, quorum=127.0.0.1:58708, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/namespace 2024-12-02T21:08:28,982 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=7, state=SUCCESS; CreateNamespaceProcedure, namespace=default in 26 msec 2024-12-02T21:08:28,989 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=8, state=RUNNABLE:CREATE_NAMESPACE_PREPARE; CreateNamespaceProcedure, namespace=hbase 2024-12-02T21:08:29,004 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36403-0x1019929a40e0000, quorum=127.0.0.1:58708, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/namespace 2024-12-02T21:08:29,016 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=8, state=SUCCESS; CreateNamespaceProcedure, namespace=hbase in 26 msec 2024-12-02T21:08:29,038 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36403-0x1019929a40e0000, quorum=127.0.0.1:58708, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/namespace/default 2024-12-02T21:08:29,054 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36403-0x1019929a40e0000, quorum=127.0.0.1:58708, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/namespace/hbase 2024-12-02T21:08:29,054 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] master.HMaster(1218): Master has completed initialization 1.691sec 2024-12-02T21:08:29,055 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-02T21:08:29,055 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-02T21:08:29,055 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-02T21:08:29,055 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-02T21:08:29,055 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-02T21:08:29,055 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7d4f3b9a7081,36403,1733173707177-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-02T21:08:29,055 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7d4f3b9a7081,36403,1733173707177-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-02T21:08:29,057 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] master.HMaster(1321): Balancer post startup initialization complete, took 0 seconds 2024-12-02T21:08:29,057 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-02T21:08:29,057 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7d4f3b9a7081,36403,1733173707177-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-02T21:08:29,146 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x658b1d03 to 127.0.0.1:58708 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@210524d3 2024-12-02T21:08:29,156 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@a8d8d51, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T21:08:29,159 DEBUG [hconnection-0x3d4e4d20-shared-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T21:08:29,162 INFO [RS-EventLoopGroup-6-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49650, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T21:08:29,165 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1199): Minicluster is up; activeMaster=7d4f3b9a7081,36403,1733173707177 2024-12-02T21:08:29,166 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T21:08:29,169 INFO [Time-limited test {}] master.MasterRpcServices(506): Client=null/null set balanceSwitch=false 2024-12-02T21:08:29,185 INFO [Time-limited test {}] client.ConnectionUtils(129): regionserver/7d4f3b9a7081:0 server-side Connection retries=45 2024-12-02T21:08:29,185 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-02T21:08:29,185 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-02T21:08:29,185 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-02T21:08:29,185 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-02T21:08:29,185 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-02T21:08:29,185 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-02T21:08:29,185 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-02T21:08:29,186 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:42679 2024-12-02T21:08:29,186 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-02T21:08:29,187 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-02T21:08:29,187 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T21:08:29,189 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T21:08:29,192 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=regionserver:42679 connecting to ZooKeeper ensemble=127.0.0.1:58708 2024-12-02T21:08:29,203 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:426790x0, quorum=127.0.0.1:58708, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-02T21:08:29,203 DEBUG [Time-limited test {}] zookeeper.ZKUtil(111): regionserver:426790x0, quorum=127.0.0.1:58708, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-02T21:08:29,203 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:42679-0x1019929a40e0003 connected 2024-12-02T21:08:29,204 DEBUG [Time-limited test {}] zookeeper.ZKUtil(111): regionserver:42679-0x1019929a40e0003, quorum=127.0.0.1:58708, baseZNode=/hbase Set watcher on existing znode=/hbase/running 2024-12-02T21:08:29,205 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:42679-0x1019929a40e0003, quorum=127.0.0.1:58708, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-02T21:08:29,206 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=42679 2024-12-02T21:08:29,206 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=42679 2024-12-02T21:08:29,207 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=42679 2024-12-02T21:08:29,207 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=42679 2024-12-02T21:08:29,207 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=42679 2024-12-02T21:08:29,208 DEBUG [pool-282-thread-1 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: INIT 2024-12-02T21:08:29,221 DEBUG [RS:1;7d4f3b9a7081:42679 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;7d4f3b9a7081:42679 2024-12-02T21:08:29,223 INFO [RS:1;7d4f3b9a7081:42679 {}] regionserver.HRegionServer(1008): ClusterId : b489ae95-5527-46fb-8697-9011c6abbd3f 2024-12-02T21:08:29,223 DEBUG [RS:1;7d4f3b9a7081:42679 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-02T21:08:29,230 DEBUG [RS:1;7d4f3b9a7081:42679 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-02T21:08:29,230 DEBUG [RS:1;7d4f3b9a7081:42679 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-02T21:08:29,239 DEBUG [RS:1;7d4f3b9a7081:42679 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-02T21:08:29,239 DEBUG [RS:1;7d4f3b9a7081:42679 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4a0f645a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T21:08:29,240 DEBUG [RS:1;7d4f3b9a7081:42679 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@717d55a9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=7d4f3b9a7081/172.17.0.2:0 2024-12-02T21:08:29,240 INFO [RS:1;7d4f3b9a7081:42679 {}] regionserver.RegionServerCoprocessorHost(67): System coprocessor loading is enabled 2024-12-02T21:08:29,240 INFO [RS:1;7d4f3b9a7081:42679 {}] regionserver.RegionServerCoprocessorHost(68): Table coprocessor loading is enabled 2024-12-02T21:08:29,240 DEBUG [RS:1;7d4f3b9a7081:42679 {}] regionserver.HRegionServer(1090): About to register with Master. 2024-12-02T21:08:29,241 INFO [RS:1;7d4f3b9a7081:42679 {}] regionserver.HRegionServer(3073): reportForDuty to master=7d4f3b9a7081,36403,1733173707177 with isa=7d4f3b9a7081/172.17.0.2:42679, startcode=1733173709184 2024-12-02T21:08:29,241 DEBUG [RS:1;7d4f3b9a7081:42679 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-02T21:08:29,244 INFO [RS-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34871, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=RegionServerStatusService 2024-12-02T21:08:29,244 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36403 {}] master.ServerManager(332): Checking decommissioned status of RegionServer 7d4f3b9a7081,42679,1733173709184 2024-12-02T21:08:29,244 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36403 {}] master.ServerManager(486): Registering regionserver=7d4f3b9a7081,42679,1733173709184 2024-12-02T21:08:29,246 DEBUG [RS:1;7d4f3b9a7081:42679 {}] regionserver.HRegionServer(1725): Config from master: hbase.rootdir=hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7 2024-12-02T21:08:29,246 DEBUG [RS:1;7d4f3b9a7081:42679 {}] regionserver.HRegionServer(1725): Config from master: fs.defaultFS=hdfs://localhost:40413 2024-12-02T21:08:29,246 DEBUG [RS:1;7d4f3b9a7081:42679 {}] regionserver.HRegionServer(1725): Config from master: hbase.master.info.port=-1 2024-12-02T21:08:29,254 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36403-0x1019929a40e0000, quorum=127.0.0.1:58708, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-02T21:08:29,255 DEBUG [RS:1;7d4f3b9a7081:42679 {}] zookeeper.ZKUtil(111): regionserver:42679-0x1019929a40e0003, quorum=127.0.0.1:58708, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/7d4f3b9a7081,42679,1733173709184 2024-12-02T21:08:29,255 WARN [RS:1;7d4f3b9a7081:42679 {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-02T21:08:29,255 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [7d4f3b9a7081,42679,1733173709184] 2024-12-02T21:08:29,256 INFO [RS:1;7d4f3b9a7081:42679 {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-02T21:08:29,256 DEBUG [RS:1;7d4f3b9a7081:42679 {}] regionserver.HRegionServer(2100): logDir=hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,42679,1733173709184 2024-12-02T21:08:29,262 DEBUG [RS:1;7d4f3b9a7081:42679 {}] regionserver.Replication(140): Replication stats-in-log period=300 seconds 2024-12-02T21:08:29,262 INFO [RS:1;7d4f3b9a7081:42679 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-02T21:08:29,266 INFO [RS:1;7d4f3b9a7081:42679 {}] regionserver.MemStoreFlusher(130): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-02T21:08:29,266 INFO [RS:1;7d4f3b9a7081:42679 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-02T21:08:29,266 INFO [RS:1;7d4f3b9a7081:42679 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-02T21:08:29,266 INFO [RS:1;7d4f3b9a7081:42679 {}] regionserver.HRegionServer$CompactionChecker(1988): CompactionChecker runs every PT1S 2024-12-02T21:08:29,267 INFO [RS:1;7d4f3b9a7081:42679 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-02T21:08:29,267 DEBUG [RS:1;7d4f3b9a7081:42679 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/7d4f3b9a7081:0, corePoolSize=1, maxPoolSize=1 2024-12-02T21:08:29,267 DEBUG [RS:1;7d4f3b9a7081:42679 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/7d4f3b9a7081:0, corePoolSize=1, maxPoolSize=1 2024-12-02T21:08:29,268 DEBUG [RS:1;7d4f3b9a7081:42679 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/7d4f3b9a7081:0, corePoolSize=1, maxPoolSize=1 2024-12-02T21:08:29,268 DEBUG [RS:1;7d4f3b9a7081:42679 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/7d4f3b9a7081:0, corePoolSize=1, maxPoolSize=1 2024-12-02T21:08:29,268 DEBUG [RS:1;7d4f3b9a7081:42679 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/7d4f3b9a7081:0, corePoolSize=1, maxPoolSize=1 2024-12-02T21:08:29,268 DEBUG [RS:1;7d4f3b9a7081:42679 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/7d4f3b9a7081:0, corePoolSize=2, maxPoolSize=2 2024-12-02T21:08:29,268 DEBUG [RS:1;7d4f3b9a7081:42679 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/7d4f3b9a7081:0, corePoolSize=1, maxPoolSize=1 2024-12-02T21:08:29,268 DEBUG [RS:1;7d4f3b9a7081:42679 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/7d4f3b9a7081:0, corePoolSize=1, maxPoolSize=1 2024-12-02T21:08:29,268 DEBUG [RS:1;7d4f3b9a7081:42679 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/7d4f3b9a7081:0, corePoolSize=1, maxPoolSize=1 2024-12-02T21:08:29,268 DEBUG [RS:1;7d4f3b9a7081:42679 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/7d4f3b9a7081:0, corePoolSize=1, maxPoolSize=1 2024-12-02T21:08:29,268 DEBUG [RS:1;7d4f3b9a7081:42679 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/7d4f3b9a7081:0, corePoolSize=1, maxPoolSize=1 2024-12-02T21:08:29,268 DEBUG [RS:1;7d4f3b9a7081:42679 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/7d4f3b9a7081:0, corePoolSize=3, maxPoolSize=3 2024-12-02T21:08:29,268 DEBUG [RS:1;7d4f3b9a7081:42679 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/7d4f3b9a7081:0, corePoolSize=3, maxPoolSize=3 2024-12-02T21:08:29,269 INFO [RS:1;7d4f3b9a7081:42679 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-02T21:08:29,269 INFO [RS:1;7d4f3b9a7081:42679 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-02T21:08:29,269 INFO [RS:1;7d4f3b9a7081:42679 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-02T21:08:29,269 INFO [RS:1;7d4f3b9a7081:42679 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-02T21:08:29,269 INFO [RS:1;7d4f3b9a7081:42679 {}] hbase.ChoreService(168): Chore ScheduledChore name=7d4f3b9a7081,42679,1733173709184-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-02T21:08:29,282 INFO [RS:1;7d4f3b9a7081:42679 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-02T21:08:29,282 INFO [RS:1;7d4f3b9a7081:42679 {}] hbase.ChoreService(168): Chore ScheduledChore name=7d4f3b9a7081,42679,1733173709184-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-02T21:08:29,293 INFO [RS:1;7d4f3b9a7081:42679 {}] regionserver.Replication(204): 7d4f3b9a7081,42679,1733173709184 started 2024-12-02T21:08:29,294 INFO [RS:1;7d4f3b9a7081:42679 {}] regionserver.HRegionServer(1767): Serving as 7d4f3b9a7081,42679,1733173709184, RpcServer on 7d4f3b9a7081/172.17.0.2:42679, sessionid=0x1019929a40e0003 2024-12-02T21:08:29,294 DEBUG [RS:1;7d4f3b9a7081:42679 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-02T21:08:29,294 INFO [Time-limited test {}] hbase.HBaseTestingUtility(3355): Started new server=Thread[RS:1;7d4f3b9a7081:42679,5,FailOnTimeoutGroup] 2024-12-02T21:08:29,294 DEBUG [RS:1;7d4f3b9a7081:42679 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 7d4f3b9a7081,42679,1733173709184 2024-12-02T21:08:29,294 DEBUG [RS:1;7d4f3b9a7081:42679 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '7d4f3b9a7081,42679,1733173709184' 2024-12-02T21:08:29,294 DEBUG [RS:1;7d4f3b9a7081:42679 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-02T21:08:29,294 INFO [Time-limited test {}] wal.TestLogRolling(191): Replication=2 2024-12-02T21:08:29,294 DEBUG [RS:1;7d4f3b9a7081:42679 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-02T21:08:29,295 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-02T21:08:29,295 DEBUG [RS:1;7d4f3b9a7081:42679 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-02T21:08:29,295 DEBUG [RS:1;7d4f3b9a7081:42679 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-02T21:08:29,295 DEBUG [RS:1;7d4f3b9a7081:42679 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 7d4f3b9a7081,42679,1733173709184 2024-12-02T21:08:29,295 DEBUG [RS:1;7d4f3b9a7081:42679 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '7d4f3b9a7081,42679,1733173709184' 2024-12-02T21:08:29,295 DEBUG [RS:1;7d4f3b9a7081:42679 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-02T21:08:29,296 DEBUG [RS:1;7d4f3b9a7081:42679 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-02T21:08:29,296 DEBUG [RS:1;7d4f3b9a7081:42679 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-02T21:08:29,296 INFO [RS:1;7d4f3b9a7081:42679 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-02T21:08:29,296 INFO [RS:1;7d4f3b9a7081:42679 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-02T21:08:29,297 INFO [RS-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42502, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-02T21:08:29,298 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36403 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-12-02T21:08:29,298 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36403 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-12-02T21:08:29,298 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36403 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestLogRolling-testLogRollOnDatanodeDeath', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-02T21:08:29,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36403 {}] procedure2.ProcedureExecutor(1098): Stored pid=9, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath 2024-12-02T21:08:29,301 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_PRE_OPERATION 2024-12-02T21:08:29,301 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T21:08:29,301 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36403 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRollOnDatanodeDeath" procId is: 9 2024-12-02T21:08:29,302 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-02T21:08:29,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36403 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-02T21:08:29,309 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44475 is added to blk_1073741837_1013 (size=393) 2024-12-02T21:08:29,310 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42239 is added to blk_1073741837_1013 (size=393) 2024-12-02T21:08:29,312 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 9eb28e6fd3caef0514628f72efd96120, NAME => 'TestLogRolling-testLogRollOnDatanodeDeath,,1733173709298.9eb28e6fd3caef0514628f72efd96120.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRollOnDatanodeDeath', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7 2024-12-02T21:08:29,318 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42239 is added to blk_1073741838_1014 (size=76) 2024-12-02T21:08:29,318 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44475 is added to blk_1073741838_1014 (size=76) 2024-12-02T21:08:29,319 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(894): Instantiated TestLogRolling-testLogRollOnDatanodeDeath,,1733173709298.9eb28e6fd3caef0514628f72efd96120.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T21:08:29,319 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1681): Closing 9eb28e6fd3caef0514628f72efd96120, disabling compactions & flushes 2024-12-02T21:08:29,319 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1703): Closing region TestLogRolling-testLogRollOnDatanodeDeath,,1733173709298.9eb28e6fd3caef0514628f72efd96120. 2024-12-02T21:08:29,319 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1733173709298.9eb28e6fd3caef0514628f72efd96120. 2024-12-02T21:08:29,319 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1733173709298.9eb28e6fd3caef0514628f72efd96120. after waiting 0 ms 2024-12-02T21:08:29,319 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestLogRolling-testLogRollOnDatanodeDeath,,1733173709298.9eb28e6fd3caef0514628f72efd96120. 2024-12-02T21:08:29,319 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1922): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1733173709298.9eb28e6fd3caef0514628f72efd96120. 2024-12-02T21:08:29,319 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1635): Region close journal for 9eb28e6fd3caef0514628f72efd96120: 2024-12-02T21:08:29,321 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_ADD_TO_META 2024-12-02T21:08:29,321 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestLogRolling-testLogRollOnDatanodeDeath,,1733173709298.9eb28e6fd3caef0514628f72efd96120.","families":{"info":[{"qualifier":"regioninfo","vlen":75,"tag":[],"timestamp":"1733173709321"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733173709321"}]},"ts":"1733173709321"} 2024-12-02T21:08:29,323 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-02T21:08:29,325 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-02T21:08:29,325 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnDatanodeDeath","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733173709325"}]},"ts":"1733173709325"} 2024-12-02T21:08:29,326 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestLogRolling-testLogRollOnDatanodeDeath, state=ENABLING in hbase:meta 2024-12-02T21:08:29,346 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=9eb28e6fd3caef0514628f72efd96120, ASSIGN}] 2024-12-02T21:08:29,348 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=9eb28e6fd3caef0514628f72efd96120, ASSIGN 2024-12-02T21:08:29,349 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(264): Starting pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=9eb28e6fd3caef0514628f72efd96120, ASSIGN; state=OFFLINE, location=7d4f3b9a7081,46239,1733173707323; forceNewPlan=false, retain=false 2024-12-02T21:08:29,398 INFO [RS:1;7d4f3b9a7081:42679 {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=7d4f3b9a7081%2C42679%2C1733173709184, suffix=, logDir=hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,42679,1733173709184, archiveDir=hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/oldWALs, maxLogs=32 2024-12-02T21:08:29,400 INFO [RS:1;7d4f3b9a7081:42679 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7d4f3b9a7081%2C42679%2C1733173709184.1733173709400 2024-12-02T21:08:29,409 INFO [RS:1;7d4f3b9a7081:42679 {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,42679,1733173709184/7d4f3b9a7081%2C42679%2C1733173709184.1733173709400 2024-12-02T21:08:29,409 DEBUG [RS:1;7d4f3b9a7081:42679 {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36911:36911),(127.0.0.1/127.0.0.1:39689:39689)] 2024-12-02T21:08:29,501 INFO [7d4f3b9a7081:36403 {}] balancer.BaseLoadBalancer(546): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-12-02T21:08:29,501 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=10 updating hbase:meta row=9eb28e6fd3caef0514628f72efd96120, regionState=OPENING, regionLocation=7d4f3b9a7081,46239,1733173707323 2024-12-02T21:08:29,503 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=11, ppid=10, state=RUNNABLE; OpenRegionProcedure 9eb28e6fd3caef0514628f72efd96120, server=7d4f3b9a7081,46239,1733173707323}] 2024-12-02T21:08:29,657 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 7d4f3b9a7081,46239,1733173707323 2024-12-02T21:08:29,666 INFO [RS_OPEN_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(135): Open TestLogRolling-testLogRollOnDatanodeDeath,,1733173709298.9eb28e6fd3caef0514628f72efd96120. 2024-12-02T21:08:29,667 DEBUG [RS_OPEN_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7285): Opening region: {ENCODED => 9eb28e6fd3caef0514628f72efd96120, NAME => 'TestLogRolling-testLogRollOnDatanodeDeath,,1733173709298.9eb28e6fd3caef0514628f72efd96120.', STARTKEY => '', ENDKEY => ''} 2024-12-02T21:08:29,668 DEBUG [RS_OPEN_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRollOnDatanodeDeath 9eb28e6fd3caef0514628f72efd96120 2024-12-02T21:08:29,668 DEBUG [RS_OPEN_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(894): Instantiated TestLogRolling-testLogRollOnDatanodeDeath,,1733173709298.9eb28e6fd3caef0514628f72efd96120.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T21:08:29,668 DEBUG [RS_OPEN_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7327): checking encryption for 9eb28e6fd3caef0514628f72efd96120 2024-12-02T21:08:29,668 DEBUG [RS_OPEN_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7330): checking classloading for 9eb28e6fd3caef0514628f72efd96120 2024-12-02T21:08:29,671 INFO [StoreOpener-9eb28e6fd3caef0514628f72efd96120-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 9eb28e6fd3caef0514628f72efd96120 2024-12-02T21:08:29,673 INFO [StoreOpener-9eb28e6fd3caef0514628f72efd96120-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 9eb28e6fd3caef0514628f72efd96120 columnFamilyName info 2024-12-02T21:08:29,673 DEBUG [StoreOpener-9eb28e6fd3caef0514628f72efd96120-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T21:08:29,674 INFO [StoreOpener-9eb28e6fd3caef0514628f72efd96120-1 {}] regionserver.HStore(327): Store=9eb28e6fd3caef0514628f72efd96120/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T21:08:29,675 DEBUG [RS_OPEN_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/data/default/TestLogRolling-testLogRollOnDatanodeDeath/9eb28e6fd3caef0514628f72efd96120 2024-12-02T21:08:29,676 DEBUG [RS_OPEN_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/data/default/TestLogRolling-testLogRollOnDatanodeDeath/9eb28e6fd3caef0514628f72efd96120 2024-12-02T21:08:29,679 DEBUG [RS_OPEN_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1085): writing seq id for 9eb28e6fd3caef0514628f72efd96120 2024-12-02T21:08:29,682 DEBUG [RS_OPEN_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/data/default/TestLogRolling-testLogRollOnDatanodeDeath/9eb28e6fd3caef0514628f72efd96120/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-02T21:08:29,682 INFO [RS_OPEN_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1102): Opened 9eb28e6fd3caef0514628f72efd96120; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=851692, jitterRate=0.08298362791538239}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-02T21:08:29,683 DEBUG [RS_OPEN_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1001): Region open journal for 9eb28e6fd3caef0514628f72efd96120: 2024-12-02T21:08:29,684 INFO [RS_OPEN_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2601): Post open deploy tasks for TestLogRolling-testLogRollOnDatanodeDeath,,1733173709298.9eb28e6fd3caef0514628f72efd96120., pid=11, masterSystemTime=1733173709657 2024-12-02T21:08:29,686 DEBUG [RS_OPEN_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2628): Finished post open deploy task for TestLogRolling-testLogRollOnDatanodeDeath,,1733173709298.9eb28e6fd3caef0514628f72efd96120. 2024-12-02T21:08:29,686 INFO [RS_OPEN_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(164): Opened TestLogRolling-testLogRollOnDatanodeDeath,,1733173709298.9eb28e6fd3caef0514628f72efd96120. 2024-12-02T21:08:29,687 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=10 updating hbase:meta row=9eb28e6fd3caef0514628f72efd96120, regionState=OPEN, openSeqNum=2, regionLocation=7d4f3b9a7081,46239,1733173707323 2024-12-02T21:08:29,691 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=11, resume processing ppid=10 2024-12-02T21:08:29,691 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=11, ppid=10, state=SUCCESS; OpenRegionProcedure 9eb28e6fd3caef0514628f72efd96120, server=7d4f3b9a7081,46239,1733173707323 in 186 msec 2024-12-02T21:08:29,693 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=10, resume processing ppid=9 2024-12-02T21:08:29,693 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=10, ppid=9, state=SUCCESS; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=9eb28e6fd3caef0514628f72efd96120, ASSIGN in 345 msec 2024-12-02T21:08:29,694 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-02T21:08:29,695 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnDatanodeDeath","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733173709694"}]},"ts":"1733173709694"} 2024-12-02T21:08:29,696 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestLogRolling-testLogRollOnDatanodeDeath, state=ENABLED in hbase:meta 2024-12-02T21:08:29,740 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_POST_OPERATION 2024-12-02T21:08:29,745 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=9, state=SUCCESS; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath in 443 msec 2024-12-02T21:08:30,090 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:08:30,096 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:08:30,610 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-02T21:08:30,613 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:08:30,635 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:08:33,693 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-02T21:08:33,695 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:namespace' 2024-12-02T21:08:33,698 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRollOnDatanodeDeath' 2024-12-02T21:08:34,008 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath 2024-12-02T21:08:34,008 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath Metrics about Tables on a single HBase RegionServer 2024-12-02T21:08:34,012 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling 2024-12-02T21:08:39,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36403 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-02T21:08:39,306 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestLogRolling-testLogRollOnDatanodeDeath, procId: 9 completed 2024-12-02T21:08:39,313 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2718): Found 1 regions for table TestLogRolling-testLogRollOnDatanodeDeath 2024-12-02T21:08:39,314 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2724): firstRegionName=TestLogRolling-testLogRollOnDatanodeDeath,,1733173709298.9eb28e6fd3caef0514628f72efd96120. 2024-12-02T21:08:39,326 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-02T21:08:39,328 WARN [Time-limited test {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:08:39,351 WARN [Time-limited test {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:08:39,360 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-02T21:08:39,363 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-02T21:08:39,364 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-02T21:08:39,364 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-02T21:08:39,364 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-02T21:08:39,365 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2b5cef5a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/0a82523b-981c-e30f-8f22-ae7c797f906e/hadoop.log.dir/,AVAILABLE} 2024-12-02T21:08:39,366 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@8232789{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-02T21:08:39,458 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4797fed6{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/0a82523b-981c-e30f-8f22-ae7c797f906e/java.io.tmpdir/jetty-localhost-43505-hadoop-hdfs-3_4_1-tests_jar-_-any-17844722948888577759/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-02T21:08:39,458 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@76a98872{HTTP/1.1, (http/1.1)}{localhost:43505} 2024-12-02T21:08:39,458 INFO [Time-limited test {}] server.Server(415): Started @130838ms 2024-12-02T21:08:39,460 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-02T21:08:39,490 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-02T21:08:39,495 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-02T21:08:39,496 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-02T21:08:39,496 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-02T21:08:39,496 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-02T21:08:39,497 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@56b35276{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/0a82523b-981c-e30f-8f22-ae7c797f906e/hadoop.log.dir/,AVAILABLE} 2024-12-02T21:08:39,497 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1603b2a2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-02T21:08:39,594 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3bfbaffe{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/0a82523b-981c-e30f-8f22-ae7c797f906e/java.io.tmpdir/jetty-localhost-33481-hadoop-hdfs-3_4_1-tests_jar-_-any-424665320398899339/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-02T21:08:39,595 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3fc0b30{HTTP/1.1, (http/1.1)}{localhost:33481} 2024-12-02T21:08:39,595 INFO [Time-limited test {}] server.Server(415): Started @130975ms 2024-12-02T21:08:39,596 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-02T21:08:39,624 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-02T21:08:39,627 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-02T21:08:39,628 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-02T21:08:39,628 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-02T21:08:39,628 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-02T21:08:39,629 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@11d5ee62{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/0a82523b-981c-e30f-8f22-ae7c797f906e/hadoop.log.dir/,AVAILABLE} 2024-12-02T21:08:39,629 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1f3582bb{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-02T21:08:39,720 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@15a5ecfd{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/0a82523b-981c-e30f-8f22-ae7c797f906e/java.io.tmpdir/jetty-localhost-44195-hadoop-hdfs-3_4_1-tests_jar-_-any-10830597422907945777/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-02T21:08:39,721 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3c8e07c0{HTTP/1.1, (http/1.1)}{localhost:44195} 2024-12-02T21:08:39,721 INFO [Time-limited test {}] server.Server(415): Started @131101ms 2024-12-02T21:08:39,723 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-02T21:08:40,164 WARN [Thread-672 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/0a82523b-981c-e30f-8f22-ae7c797f906e/cluster_3abf8b3b-f6f4-828c-a519-1f257913387f/dfs/data/data6/current/BP-967350915-172.17.0.2-1733173705684/current, will proceed with Du for space computation calculation, 2024-12-02T21:08:40,164 WARN [Thread-671 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/0a82523b-981c-e30f-8f22-ae7c797f906e/cluster_3abf8b3b-f6f4-828c-a519-1f257913387f/dfs/data/data5/current/BP-967350915-172.17.0.2-1733173705684/current, will proceed with Du for space computation calculation, 2024-12-02T21:08:40,184 WARN [Thread-613 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-02T21:08:40,187 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x22a91b2f19eb3ad3 with lease ID 0x89dd5ecfbf8accec: Processing first storage report for DS-3e1de32e-0f5b-40e5-9985-8a8ac62fe152 from datanode DatanodeRegistration(127.0.0.1:34327, datanodeUuid=542ed2d8-794e-48cb-ad49-b619ecac27ec, infoPort=36285, infoSecurePort=0, ipcPort=34775, storageInfo=lv=-57;cid=testClusterID;nsid=1125043526;c=1733173705684) 2024-12-02T21:08:40,187 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x22a91b2f19eb3ad3 with lease ID 0x89dd5ecfbf8accec: from storage DS-3e1de32e-0f5b-40e5-9985-8a8ac62fe152 node DatanodeRegistration(127.0.0.1:34327, datanodeUuid=542ed2d8-794e-48cb-ad49-b619ecac27ec, infoPort=36285, infoSecurePort=0, ipcPort=34775, storageInfo=lv=-57;cid=testClusterID;nsid=1125043526;c=1733173705684), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-02T21:08:40,187 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x22a91b2f19eb3ad3 with lease ID 0x89dd5ecfbf8accec: Processing first storage report for DS-9e718dd4-a40e-4b3a-98c5-dc2200334c68 from datanode DatanodeRegistration(127.0.0.1:34327, datanodeUuid=542ed2d8-794e-48cb-ad49-b619ecac27ec, infoPort=36285, infoSecurePort=0, ipcPort=34775, storageInfo=lv=-57;cid=testClusterID;nsid=1125043526;c=1733173705684) 2024-12-02T21:08:40,187 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x22a91b2f19eb3ad3 with lease ID 0x89dd5ecfbf8accec: from storage DS-9e718dd4-a40e-4b3a-98c5-dc2200334c68 node DatanodeRegistration(127.0.0.1:34327, datanodeUuid=542ed2d8-794e-48cb-ad49-b619ecac27ec, infoPort=36285, infoSecurePort=0, ipcPort=34775, storageInfo=lv=-57;cid=testClusterID;nsid=1125043526;c=1733173705684), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-02T21:08:40,324 WARN [Thread-683 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/0a82523b-981c-e30f-8f22-ae7c797f906e/cluster_3abf8b3b-f6f4-828c-a519-1f257913387f/dfs/data/data7/current/BP-967350915-172.17.0.2-1733173705684/current, will proceed with Du for space computation calculation, 2024-12-02T21:08:40,324 WARN [Thread-684 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/0a82523b-981c-e30f-8f22-ae7c797f906e/cluster_3abf8b3b-f6f4-828c-a519-1f257913387f/dfs/data/data8/current/BP-967350915-172.17.0.2-1733173705684/current, will proceed with Du for space computation calculation, 2024-12-02T21:08:40,350 WARN [Thread-635 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-02T21:08:40,352 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xd983847aac5b4b1c with lease ID 0x89dd5ecfbf8acced: Processing first storage report for DS-dd27ee88-6fd5-46c8-aaad-4c235dc87934 from datanode DatanodeRegistration(127.0.0.1:41471, datanodeUuid=77085c06-509e-408e-bf2e-cee2276d56da, infoPort=35139, infoSecurePort=0, ipcPort=34931, storageInfo=lv=-57;cid=testClusterID;nsid=1125043526;c=1733173705684) 2024-12-02T21:08:40,352 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xd983847aac5b4b1c with lease ID 0x89dd5ecfbf8acced: from storage DS-dd27ee88-6fd5-46c8-aaad-4c235dc87934 node DatanodeRegistration(127.0.0.1:41471, datanodeUuid=77085c06-509e-408e-bf2e-cee2276d56da, infoPort=35139, infoSecurePort=0, ipcPort=34931, storageInfo=lv=-57;cid=testClusterID;nsid=1125043526;c=1733173705684), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-02T21:08:40,352 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xd983847aac5b4b1c with lease ID 0x89dd5ecfbf8acced: Processing first storage report for DS-eb8959d4-4d45-478f-b08b-8cf00711ce45 from datanode DatanodeRegistration(127.0.0.1:41471, datanodeUuid=77085c06-509e-408e-bf2e-cee2276d56da, infoPort=35139, infoSecurePort=0, ipcPort=34931, storageInfo=lv=-57;cid=testClusterID;nsid=1125043526;c=1733173705684) 2024-12-02T21:08:40,352 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xd983847aac5b4b1c with lease ID 0x89dd5ecfbf8acced: from storage DS-eb8959d4-4d45-478f-b08b-8cf00711ce45 node DatanodeRegistration(127.0.0.1:41471, datanodeUuid=77085c06-509e-408e-bf2e-cee2276d56da, infoPort=35139, infoSecurePort=0, ipcPort=34931, storageInfo=lv=-57;cid=testClusterID;nsid=1125043526;c=1733173705684), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-02T21:08:40,416 WARN [Thread-695 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/0a82523b-981c-e30f-8f22-ae7c797f906e/cluster_3abf8b3b-f6f4-828c-a519-1f257913387f/dfs/data/data10/current/BP-967350915-172.17.0.2-1733173705684/current, will proceed with Du for space computation calculation, 2024-12-02T21:08:40,416 WARN [Thread-694 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/0a82523b-981c-e30f-8f22-ae7c797f906e/cluster_3abf8b3b-f6f4-828c-a519-1f257913387f/dfs/data/data9/current/BP-967350915-172.17.0.2-1733173705684/current, will proceed with Du for space computation calculation, 2024-12-02T21:08:40,432 WARN [Thread-657 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-02T21:08:40,434 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x10483c5a5d18bee7 with lease ID 0x89dd5ecfbf8accee: Processing first storage report for DS-6035e3bc-16e6-4f1e-bdd3-3bc5051602af from datanode DatanodeRegistration(127.0.0.1:37629, datanodeUuid=6813777f-477b-4558-9cef-15f971230ace, infoPort=35585, infoSecurePort=0, ipcPort=38151, storageInfo=lv=-57;cid=testClusterID;nsid=1125043526;c=1733173705684) 2024-12-02T21:08:40,434 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x10483c5a5d18bee7 with lease ID 0x89dd5ecfbf8accee: from storage DS-6035e3bc-16e6-4f1e-bdd3-3bc5051602af node DatanodeRegistration(127.0.0.1:37629, datanodeUuid=6813777f-477b-4558-9cef-15f971230ace, infoPort=35585, infoSecurePort=0, ipcPort=38151, storageInfo=lv=-57;cid=testClusterID;nsid=1125043526;c=1733173705684), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-02T21:08:40,435 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x10483c5a5d18bee7 with lease ID 0x89dd5ecfbf8accee: Processing first storage report for DS-e5d3e9b9-fc67-4d68-a2f4-2bf2f2c0db0b from datanode DatanodeRegistration(127.0.0.1:37629, datanodeUuid=6813777f-477b-4558-9cef-15f971230ace, infoPort=35585, infoSecurePort=0, ipcPort=38151, storageInfo=lv=-57;cid=testClusterID;nsid=1125043526;c=1733173705684) 2024-12-02T21:08:40,435 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x10483c5a5d18bee7 with lease ID 0x89dd5ecfbf8accee: from storage DS-e5d3e9b9-fc67-4d68-a2f4-2bf2f2c0db0b node DatanodeRegistration(127.0.0.1:37629, datanodeUuid=6813777f-477b-4558-9cef-15f971230ace, infoPort=35585, infoSecurePort=0, ipcPort=38151, storageInfo=lv=-57;cid=testClusterID;nsid=1125043526;c=1733173705684), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-02T21:08:40,451 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@ad49bc2{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-02T21:08:40,451 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3a81d4c0{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-02T21:08:40,451 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-02T21:08:40,452 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@108b35f9{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-02T21:08:40,452 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@59c97a81{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/0a82523b-981c-e30f-8f22-ae7c797f906e/hadoop.log.dir/,STOPPED} 2024-12-02T21:08:40,449 WARN [ResponseProcessor for block BP-967350915-172.17.0.2-1733173705684:blk_1073741833_1009 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-967350915-172.17.0.2-1733173705684:blk_1073741833_1009 java.io.IOException: Bad response ERROR for BP-967350915-172.17.0.2-1733173705684:blk_1073741833_1009 from datanode DatanodeInfoWithStorage[127.0.0.1:44475,DS-7999ded0-6f63-4ce0-8abb-657329e12700,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T21:08:40,449 WARN [ResponseProcessor for block BP-967350915-172.17.0.2-1733173705684:blk_1073741830_1006 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-967350915-172.17.0.2-1733173705684:blk_1073741830_1006 java.io.IOException: Bad response ERROR for BP-967350915-172.17.0.2-1733173705684:blk_1073741830_1006 from datanode DatanodeInfoWithStorage[127.0.0.1:44475,DS-7999ded0-6f63-4ce0-8abb-657329e12700,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T21:08:40,449 WARN [ResponseProcessor for block BP-967350915-172.17.0.2-1733173705684:blk_1073741839_1015 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-967350915-172.17.0.2-1733173705684:blk_1073741839_1015 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T21:08:40,449 WARN [ResponseProcessor for block BP-967350915-172.17.0.2-1733173705684:blk_1073741834_1010 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-967350915-172.17.0.2-1733173705684:blk_1073741834_1010 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T21:08:40,455 WARN [BP-967350915-172.17.0.2-1733173705684 heartbeating to localhost/127.0.0.1:40413 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-02T21:08:40,455 WARN [DataStreamer for file /user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.1733173707842 block BP-967350915-172.17.0.2-1733173705684:blk_1073741833_1009 {}] hdfs.DataStreamer(1731): Error Recovery for BP-967350915-172.17.0.2-1733173705684:blk_1073741833_1009 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42239,DS-86240d69-8ed7-4081-b6bc-0889c709e6c1,DISK], DatanodeInfoWithStorage[127.0.0.1:44475,DS-7999ded0-6f63-4ce0-8abb-657329e12700,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:44475,DS-7999ded0-6f63-4ce0-8abb-657329e12700,DISK]) is bad. 2024-12-02T21:08:40,454 WARN [DataStreamer for file /user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/MasterData/WALs/7d4f3b9a7081,36403,1733173707177/7d4f3b9a7081%2C36403%2C1733173707177.1733173707435 block BP-967350915-172.17.0.2-1733173705684:blk_1073741830_1006 {}] hdfs.DataStreamer(1731): Error Recovery for BP-967350915-172.17.0.2-1733173705684:blk_1073741830_1006 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42239,DS-86240d69-8ed7-4081-b6bc-0889c709e6c1,DISK], DatanodeInfoWithStorage[127.0.0.1:44475,DS-7999ded0-6f63-4ce0-8abb-657329e12700,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:44475,DS-7999ded0-6f63-4ce0-8abb-657329e12700,DISK]) is bad. 2024-12-02T21:08:40,454 WARN [DataStreamer for file /user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.meta.1733173708307.meta block BP-967350915-172.17.0.2-1733173705684:blk_1073741834_1010 {}] hdfs.DataStreamer(1731): Error Recovery for BP-967350915-172.17.0.2-1733173705684:blk_1073741834_1010 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44475,DS-7999ded0-6f63-4ce0-8abb-657329e12700,DISK], DatanodeInfoWithStorage[127.0.0.1:42239,DS-86240d69-8ed7-4081-b6bc-0889c709e6c1,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44475,DS-7999ded0-6f63-4ce0-8abb-657329e12700,DISK]) is bad. 2024-12-02T21:08:40,455 WARN [DataStreamer for file /user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,42679,1733173709184/7d4f3b9a7081%2C42679%2C1733173709184.1733173709400 block BP-967350915-172.17.0.2-1733173705684:blk_1073741839_1015 {}] hdfs.DataStreamer(1731): Error Recovery for BP-967350915-172.17.0.2-1733173705684:blk_1073741839_1015 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44475,DS-7999ded0-6f63-4ce0-8abb-657329e12700,DISK], DatanodeInfoWithStorage[127.0.0.1:42239,DS-86240d69-8ed7-4081-b6bc-0889c709e6c1,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44475,DS-7999ded0-6f63-4ce0-8abb-657329e12700,DISK]) is bad. 2024-12-02T21:08:40,455 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-02T21:08:40,455 WARN [BP-967350915-172.17.0.2-1733173705684 heartbeating to localhost/127.0.0.1:40413 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-967350915-172.17.0.2-1733173705684 (Datanode Uuid e2a33afa-21be-4db5-98c1-a8e5ef378ea1) service to localhost/127.0.0.1:40413 2024-12-02T21:08:40,455 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-02T21:08:40,455 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_2003058785_22 at /127.0.0.1:44866 [Receiving block BP-967350915-172.17.0.2-1733173705684:blk_1073741839_1015] {}] datanode.DataXceiver(331): 127.0.0.1:42239:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:44866 dst: /127.0.0.1:42239 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T21:08:40,454 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-850107725_22 at /127.0.0.1:57440 [Receiving block BP-967350915-172.17.0.2-1733173705684:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:44475:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:57440 dst: /127.0.0.1:44475 java.io.InterruptedIOException: Interrupted while waiting for IO on channel java.nio.channels.SocketChannel[closed]. Total timeout mills is 60000, 49243 millis timeout left. at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:350) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T21:08:40,455 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-850107725_22 at /127.0.0.1:44816 [Receiving block BP-967350915-172.17.0.2-1733173705684:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:42239:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:44816 dst: /127.0.0.1:42239 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T21:08:40,456 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/0a82523b-981c-e30f-8f22-ae7c797f906e/cluster_3abf8b3b-f6f4-828c-a519-1f257913387f/dfs/data/data3/current/BP-967350915-172.17.0.2-1733173705684 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-02T21:08:40,455 WARN [PacketResponder: BP-967350915-172.17.0.2-1733173705684:blk_1073741830_1006, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:44475] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T21:08:40,456 WARN [PacketResponder: BP-967350915-172.17.0.2-1733173705684:blk_1073741833_1009, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:44475] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Broken pipe at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T21:08:40,456 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/0a82523b-981c-e30f-8f22-ae7c797f906e/cluster_3abf8b3b-f6f4-828c-a519-1f257913387f/dfs/data/data4/current/BP-967350915-172.17.0.2-1733173705684 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-02T21:08:40,457 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-02T21:08:40,454 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-937202529_22 at /127.0.0.1:38980 [Receiving block BP-967350915-172.17.0.2-1733173705684:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:44475:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:38980 dst: /127.0.0.1:44475 java.io.InterruptedIOException: Interrupted while waiting for IO on channel java.nio.channels.SocketChannel[closed]. Total timeout mills is 60000, 49291 millis timeout left. at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:350) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T21:08:40,454 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-850107725_22 at /127.0.0.1:39012 [Receiving block BP-967350915-172.17.0.2-1733173705684:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:44475:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:39012 dst: /127.0.0.1:44475 java.io.InterruptedIOException: Interrupted while waiting for IO on channel java.nio.channels.SocketChannel[closed]. Total timeout mills is 60000, 49231 millis timeout left. at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:350) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T21:08:40,456 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-937202529_22 at /127.0.0.1:56102 [Receiving block BP-967350915-172.17.0.2-1733173705684:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:42239:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:56102 dst: /127.0.0.1:42239 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T21:08:40,457 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-850107725_22 at /127.0.0.1:56132 [Receiving block BP-967350915-172.17.0.2-1733173705684:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:42239:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:56132 dst: /127.0.0.1:42239 java.net.SocketException: Connection reset at sun.nio.ch.SocketChannelImpl.throwConnectionReset(SocketChannelImpl.java:394) ~[?:?] at sun.nio.ch.SocketChannelImpl.read(SocketChannelImpl.java:426) ~[?:?] at org.apache.hadoop.net.SocketInputStream$Reader.performIO(SocketInputStream.java:57) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T21:08:40,455 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_2003058785_22 at /127.0.0.1:57488 [Receiving block BP-967350915-172.17.0.2-1733173705684:blk_1073741839_1015] {}] datanode.DataXceiver(331): 127.0.0.1:44475:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:57488 dst: /127.0.0.1:44475 java.io.InterruptedIOException: Interrupted while waiting for IO on channel java.nio.channels.SocketChannel[closed]. Total timeout mills is 60000, 48954 millis timeout left. at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:350) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T21:08:40,461 WARN [DataStreamer for file /user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,42679,1733173709184/7d4f3b9a7081%2C42679%2C1733173709184.1733173709400 block BP-967350915-172.17.0.2-1733173705684:blk_1073741839_1015 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741839_1015 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T21:08:40,461 WARN [DataStreamer for file /user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/MasterData/WALs/7d4f3b9a7081,36403,1733173707177/7d4f3b9a7081%2C36403%2C1733173707177.1733173707435 block BP-967350915-172.17.0.2-1733173705684:blk_1073741830_1006 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741830_1006 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T21:08:40,464 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@2f925d25{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-02T21:08:40,464 WARN [DataStreamer for file /user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.meta.1733173708307.meta block BP-967350915-172.17.0.2-1733173705684:blk_1073741834_1010 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741834_1010 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T21:08:40,464 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@1c800c5d{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-02T21:08:40,464 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-02T21:08:40,464 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3efea70d{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-02T21:08:40,464 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2b019b40{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/0a82523b-981c-e30f-8f22-ae7c797f906e/hadoop.log.dir/,STOPPED} 2024-12-02T21:08:40,466 WARN [BP-967350915-172.17.0.2-1733173705684 heartbeating to localhost/127.0.0.1:40413 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-02T21:08:40,466 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-02T21:08:40,466 WARN [BP-967350915-172.17.0.2-1733173705684 heartbeating to localhost/127.0.0.1:40413 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-967350915-172.17.0.2-1733173705684 (Datanode Uuid e5e7676b-015b-44d3-a217-81e86e95d876) service to localhost/127.0.0.1:40413 2024-12-02T21:08:40,466 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-02T21:08:40,466 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/0a82523b-981c-e30f-8f22-ae7c797f906e/cluster_3abf8b3b-f6f4-828c-a519-1f257913387f/dfs/data/data1/current/BP-967350915-172.17.0.2-1733173705684 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-02T21:08:40,466 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/0a82523b-981c-e30f-8f22-ae7c797f906e/cluster_3abf8b3b-f6f4-828c-a519-1f257913387f/dfs/data/data2/current/BP-967350915-172.17.0.2-1733173705684 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-02T21:08:40,466 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-02T21:08:40,468 WARN [DataStreamer for file /user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.1733173707842 block BP-967350915-172.17.0.2-1733173705684:blk_1073741833_1009 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741833_1009 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T21:08:40,470 WARN [RS:0;7d4f3b9a7081:46239.append-pool-0 {}] wal.FSHLog$RingBufferEventHandler(1189): Append sequenceId=4, requesting roll of WAL java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42239,DS-86240d69-8ed7-4081-b6bc-0889c709e6c1,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T21:08:40,471 DEBUG [regionserver/7d4f3b9a7081:0.logRoller {}] wal.AbstractWALRoller(197): WAL FSHLog 7d4f3b9a7081%2C46239%2C1733173707323:(num 1733173707842) roll requested 2024-12-02T21:08:40,471 INFO [regionserver/7d4f3b9a7081:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7d4f3b9a7081%2C46239%2C1733173707323.1733173720471 2024-12-02T21:08:40,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=4, requesting roll of WAL at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.append(FSHLog.java:1191) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.onEvent(FSHLog.java:1064) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.onEvent(FSHLog.java:967) ~[classes/:?] at com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:168) ~[disruptor-3.4.4.jar:?] at com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) ~[disruptor-3.4.4.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42239,DS-86240d69-8ed7-4081-b6bc-0889c709e6c1,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T21:08:40,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:49650 deadline: 1733173730470, exception=org.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=4, requesting roll of WAL 2024-12-02T21:08:40,474 WARN [Thread-705 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741840_1020 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T21:08:40,474 WARN [Thread-705 {}] hdfs.DataStreamer(1731): Error Recovery for BP-967350915-172.17.0.2-1733173705684:blk_1073741840_1020 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44475,DS-7999ded0-6f63-4ce0-8abb-657329e12700,DISK], DatanodeInfoWithStorage[127.0.0.1:37629,DS-6035e3bc-16e6-4f1e-bdd3-3bc5051602af,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44475,DS-7999ded0-6f63-4ce0-8abb-657329e12700,DISK]) is bad. 2024-12-02T21:08:40,474 WARN [Thread-705 {}] hdfs.DataStreamer(1850): Abandoning BP-967350915-172.17.0.2-1733173705684:blk_1073741840_1020 2024-12-02T21:08:40,476 WARN [Thread-705 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44475,DS-7999ded0-6f63-4ce0-8abb-657329e12700,DISK] 2024-12-02T21:08:40,483 WARN [regionserver/7d4f3b9a7081:0.logRoller {}] wal.FSHLog(373): Failed sync-before-close but no outstanding appends; closing WALorg.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=4, requesting roll of WAL 2024-12-02T21:08:40,483 INFO [regionserver/7d4f3b9a7081:0.logRoller {}] wal.AbstractFSWAL(837): Rolled WAL /user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.1733173707842 with entries=4, filesize=959 B; new WAL /user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.1733173720471 2024-12-02T21:08:40,483 DEBUG [regionserver/7d4f3b9a7081:0.logRoller {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35139:35139),(127.0.0.1/127.0.0.1:36285:36285)] 2024-12-02T21:08:40,483 DEBUG [regionserver/7d4f3b9a7081:0.logRoller {}] wal.AbstractFSWAL(751): hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.1733173707842 is not closed yet, will try archiving it next time 2024-12-02T21:08:40,483 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42239,DS-86240d69-8ed7-4081-b6bc-0889c709e6c1,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T21:08:40,483 WARN [Close-WAL-Writer-0 {}] wal.FSHLog(462): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42239,DS-86240d69-8ed7-4081-b6bc-0889c709e6c1,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T21:08:40,484 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(47): Initialize RecoverLeaseFSUtils 2024-12-02T21:08:40,485 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(59): set recoverLeaseMethod to org.apache.hadoop.fs.LeaseRecoverable.recoverLease() 2024-12-02T21:08:40,485 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.1733173707842 2024-12-02T21:08:40,488 WARN [IPC Server handler 2 on default port 40413 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.1733173707842 has not been closed. Lease recovery is in progress. RecoveryId = 1022 for block blk_1073741833_1009 2024-12-02T21:08:40,489 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.1733173707842 after 4ms 2024-12-02T21:08:44,492 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.1733173707842 after 4007ms 2024-12-02T21:08:52,583 INFO [Time-limited test {}] wal.TestLogRolling(243): log.getCurrentFileName(): hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.1733173720471 2024-12-02T21:08:52,584 WARN [ResponseProcessor for block BP-967350915-172.17.0.2-1733173705684:blk_1073741841_1021 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-967350915-172.17.0.2-1733173705684:blk_1073741841_1021 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T21:08:52,585 WARN [DataStreamer for file /user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.1733173720471 block BP-967350915-172.17.0.2-1733173705684:blk_1073741841_1021 {}] hdfs.DataStreamer(1731): Error Recovery for BP-967350915-172.17.0.2-1733173705684:blk_1073741841_1021 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41471,DS-dd27ee88-6fd5-46c8-aaad-4c235dc87934,DISK], DatanodeInfoWithStorage[127.0.0.1:34327,DS-3e1de32e-0f5b-40e5-9985-8a8ac62fe152,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41471,DS-dd27ee88-6fd5-46c8-aaad-4c235dc87934,DISK]) is bad. 2024-12-02T21:08:52,587 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-850107725_22 at /127.0.0.1:51662 [Receiving block BP-967350915-172.17.0.2-1733173705684:blk_1073741841_1021] {}] datanode.DataXceiver(331): 127.0.0.1:41471:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:51662 dst: /127.0.0.1:41471 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T21:08:52,587 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-850107725_22 at /127.0.0.1:44638 [Receiving block BP-967350915-172.17.0.2-1733173705684:blk_1073741841_1021] {}] datanode.DataXceiver(331): 127.0.0.1:34327:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:44638 dst: /127.0.0.1:34327 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T21:08:52,636 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3bfbaffe{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-02T21:08:52,637 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3fc0b30{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-02T21:08:52,637 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-02T21:08:52,637 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1603b2a2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-02T21:08:52,638 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@56b35276{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/0a82523b-981c-e30f-8f22-ae7c797f906e/hadoop.log.dir/,STOPPED} 2024-12-02T21:08:52,642 WARN [BP-967350915-172.17.0.2-1733173705684 heartbeating to localhost/127.0.0.1:40413 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-02T21:08:52,642 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-02T21:08:52,642 WARN [BP-967350915-172.17.0.2-1733173705684 heartbeating to localhost/127.0.0.1:40413 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-967350915-172.17.0.2-1733173705684 (Datanode Uuid 77085c06-509e-408e-bf2e-cee2276d56da) service to localhost/127.0.0.1:40413 2024-12-02T21:08:52,642 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-02T21:08:52,643 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/0a82523b-981c-e30f-8f22-ae7c797f906e/cluster_3abf8b3b-f6f4-828c-a519-1f257913387f/dfs/data/data7/current/BP-967350915-172.17.0.2-1733173705684 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-02T21:08:52,643 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/0a82523b-981c-e30f-8f22-ae7c797f906e/cluster_3abf8b3b-f6f4-828c-a519-1f257913387f/dfs/data/data8/current/BP-967350915-172.17.0.2-1733173705684 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-02T21:08:52,643 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-02T21:08:52,646 WARN [sync.1 {}] wal.FSHLog(750): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:34327,DS-3e1de32e-0f5b-40e5-9985-8a8ac62fe152,DISK]] 2024-12-02T21:08:52,646 WARN [sync.1 {}] wal.FSHLog(721): Requesting log roll because of low replication, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:34327,DS-3e1de32e-0f5b-40e5-9985-8a8ac62fe152,DISK]] 2024-12-02T21:08:52,646 DEBUG [regionserver/7d4f3b9a7081:0.logRoller {}] wal.AbstractWALRoller(197): WAL FSHLog 7d4f3b9a7081%2C46239%2C1733173707323:(num 1733173720471) roll requested 2024-12-02T21:08:52,646 INFO [regionserver/7d4f3b9a7081:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7d4f3b9a7081%2C46239%2C1733173707323.1733173732646 2024-12-02T21:08:52,650 WARN [Thread-714 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741842_1024 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:41471 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T21:08:52,650 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-850107725_22 at /127.0.0.1:50022 [Receiving block BP-967350915-172.17.0.2-1733173705684:blk_1073741842_1024] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/0a82523b-981c-e30f-8f22-ae7c797f906e/cluster_3abf8b3b-f6f4-828c-a519-1f257913387f/dfs/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/0a82523b-981c-e30f-8f22-ae7c797f906e/cluster_3abf8b3b-f6f4-828c-a519-1f257913387f/dfs/data/data10]'}, localName='127.0.0.1:37629', datanodeUuid='6813777f-477b-4558-9cef-15f971230ace', xmitsInProgress=0}:Exception transferring block BP-967350915-172.17.0.2-1733173705684:blk_1073741842_1024 to mirror 127.0.0.1:41471 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T21:08:52,650 WARN [Thread-714 {}] hdfs.DataStreamer(1731): Error Recovery for BP-967350915-172.17.0.2-1733173705684:blk_1073741842_1024 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37629,DS-6035e3bc-16e6-4f1e-bdd3-3bc5051602af,DISK], DatanodeInfoWithStorage[127.0.0.1:41471,DS-dd27ee88-6fd5-46c8-aaad-4c235dc87934,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:41471,DS-dd27ee88-6fd5-46c8-aaad-4c235dc87934,DISK]) is bad. 2024-12-02T21:08:52,650 WARN [Thread-714 {}] hdfs.DataStreamer(1850): Abandoning BP-967350915-172.17.0.2-1733173705684:blk_1073741842_1024 2024-12-02T21:08:52,651 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-850107725_22 at /127.0.0.1:50022 [Receiving block BP-967350915-172.17.0.2-1733173705684:blk_1073741842_1024] {}] datanode.BlockReceiver(316): Block 1073741842 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-12-02T21:08:52,651 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-850107725_22 at /127.0.0.1:50022 [Receiving block BP-967350915-172.17.0.2-1733173705684:blk_1073741842_1024] {}] datanode.DataXceiver(331): 127.0.0.1:37629:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:50022 dst: /127.0.0.1:37629 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T21:08:52,651 WARN [Thread-714 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41471,DS-dd27ee88-6fd5-46c8-aaad-4c235dc87934,DISK] 2024-12-02T21:08:52,652 WARN [Thread-714 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741843_1025 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T21:08:52,653 WARN [Thread-714 {}] hdfs.DataStreamer(1731): Error Recovery for BP-967350915-172.17.0.2-1733173705684:blk_1073741843_1025 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44475,DS-7999ded0-6f63-4ce0-8abb-657329e12700,DISK], DatanodeInfoWithStorage[127.0.0.1:34327,DS-3e1de32e-0f5b-40e5-9985-8a8ac62fe152,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44475,DS-7999ded0-6f63-4ce0-8abb-657329e12700,DISK]) is bad. 2024-12-02T21:08:52,653 WARN [Thread-714 {}] hdfs.DataStreamer(1850): Abandoning BP-967350915-172.17.0.2-1733173705684:blk_1073741843_1025 2024-12-02T21:08:52,653 WARN [Thread-714 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44475,DS-7999ded0-6f63-4ce0-8abb-657329e12700,DISK] 2024-12-02T21:08:52,659 INFO [regionserver/7d4f3b9a7081:0.logRoller {}] wal.AbstractFSWAL(837): Rolled WAL /user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.1733173720471 with entries=2, filesize=2.37 KB; new WAL /user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.1733173732646 2024-12-02T21:08:52,659 DEBUG [regionserver/7d4f3b9a7081:0.logRoller {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36285:36285),(127.0.0.1/127.0.0.1:35585:35585)] 2024-12-02T21:08:52,659 DEBUG [regionserver/7d4f3b9a7081:0.logRoller {}] wal.AbstractFSWAL(751): hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.1733173707842 is not closed yet, will try archiving it next time 2024-12-02T21:08:52,659 DEBUG [regionserver/7d4f3b9a7081:0.logRoller {}] wal.AbstractFSWAL(751): hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.1733173720471 is not closed yet, will try archiving it next time 2024-12-02T21:08:52,660 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34327 is added to blk_1073741841_1023 (size=2431) 2024-12-02T21:08:53,063 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(751): hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.1733173707842 is not closed yet, will try archiving it next time 2024-12-02T21:08:53,201 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@1500102[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:34327, datanodeUuid=542ed2d8-794e-48cb-ad49-b619ecac27ec, infoPort=36285, infoSecurePort=0, ipcPort=34775, storageInfo=lv=-57;cid=testClusterID;nsid=1125043526;c=1733173705684):Failed to transfer BP-967350915-172.17.0.2-1733173705684:blk_1073741841_1023 to 127.0.0.1:42239 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T21:08:56,652 WARN [ResponseProcessor for block BP-967350915-172.17.0.2-1733173705684:blk_1073741844_1026 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-967350915-172.17.0.2-1733173705684:blk_1073741844_1026 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T21:08:56,654 WARN [DataStreamer for file /user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.1733173732646 block BP-967350915-172.17.0.2-1733173705684:blk_1073741844_1026 {}] hdfs.DataStreamer(1731): Error Recovery for BP-967350915-172.17.0.2-1733173705684:blk_1073741844_1026 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34327,DS-3e1de32e-0f5b-40e5-9985-8a8ac62fe152,DISK], DatanodeInfoWithStorage[127.0.0.1:37629,DS-6035e3bc-16e6-4f1e-bdd3-3bc5051602af,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34327,DS-3e1de32e-0f5b-40e5-9985-8a8ac62fe152,DISK]) is bad. 2024-12-02T21:08:56,654 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-850107725_22 at /127.0.0.1:51556 [Receiving block BP-967350915-172.17.0.2-1733173705684:blk_1073741844_1026] {}] datanode.DataXceiver(331): 127.0.0.1:34327:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:51556 dst: /127.0.0.1:34327 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T21:08:56,655 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-850107725_22 at /127.0.0.1:50034 [Receiving block BP-967350915-172.17.0.2-1733173705684:blk_1073741844_1026] {}] datanode.DataXceiver(331): 127.0.0.1:37629:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:50034 dst: /127.0.0.1:37629 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T21:08:56,737 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4797fed6{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-02T21:08:56,738 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@76a98872{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-02T21:08:56,739 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-02T21:08:56,739 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@8232789{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-02T21:08:56,739 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2b5cef5a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/0a82523b-981c-e30f-8f22-ae7c797f906e/hadoop.log.dir/,STOPPED} 2024-12-02T21:08:56,743 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-02T21:08:56,743 WARN [BP-967350915-172.17.0.2-1733173705684 heartbeating to localhost/127.0.0.1:40413 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-02T21:08:56,743 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-02T21:08:56,743 WARN [BP-967350915-172.17.0.2-1733173705684 heartbeating to localhost/127.0.0.1:40413 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-967350915-172.17.0.2-1733173705684 (Datanode Uuid 542ed2d8-794e-48cb-ad49-b619ecac27ec) service to localhost/127.0.0.1:40413 2024-12-02T21:08:56,744 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/0a82523b-981c-e30f-8f22-ae7c797f906e/cluster_3abf8b3b-f6f4-828c-a519-1f257913387f/dfs/data/data5/current/BP-967350915-172.17.0.2-1733173705684 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-02T21:08:56,745 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/0a82523b-981c-e30f-8f22-ae7c797f906e/cluster_3abf8b3b-f6f4-828c-a519-1f257913387f/dfs/data/data6/current/BP-967350915-172.17.0.2-1733173705684 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-02T21:08:56,745 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-02T21:08:56,747 WARN [sync.4 {}] wal.FSHLog(750): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:37629,DS-6035e3bc-16e6-4f1e-bdd3-3bc5051602af,DISK]] 2024-12-02T21:08:56,748 WARN [sync.4 {}] wal.FSHLog(721): Requesting log roll because of low replication, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:37629,DS-6035e3bc-16e6-4f1e-bdd3-3bc5051602af,DISK]] 2024-12-02T21:08:56,748 DEBUG [regionserver/7d4f3b9a7081:0.logRoller {}] wal.AbstractWALRoller(197): WAL FSHLog 7d4f3b9a7081%2C46239%2C1733173707323:(num 1733173732646) roll requested 2024-12-02T21:08:56,748 INFO [regionserver/7d4f3b9a7081:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7d4f3b9a7081%2C46239%2C1733173707323.1733173736748 2024-12-02T21:08:56,751 WARN [Thread-726 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741845_1028 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T21:08:56,751 WARN [Thread-726 {}] hdfs.DataStreamer(1731): Error Recovery for BP-967350915-172.17.0.2-1733173705684:blk_1073741845_1028 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41471,DS-dd27ee88-6fd5-46c8-aaad-4c235dc87934,DISK], DatanodeInfoWithStorage[127.0.0.1:42239,DS-86240d69-8ed7-4081-b6bc-0889c709e6c1,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41471,DS-dd27ee88-6fd5-46c8-aaad-4c235dc87934,DISK]) is bad. 2024-12-02T21:08:56,751 WARN [Thread-726 {}] hdfs.DataStreamer(1850): Abandoning BP-967350915-172.17.0.2-1733173705684:blk_1073741845_1028 2024-12-02T21:08:56,752 WARN [Thread-726 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41471,DS-dd27ee88-6fd5-46c8-aaad-4c235dc87934,DISK] 2024-12-02T21:08:56,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(8581): Flush requested on 9eb28e6fd3caef0514628f72efd96120 2024-12-02T21:08:56,753 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 9eb28e6fd3caef0514628f72efd96120 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-02T21:08:56,755 WARN [Thread-726 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741846_1029 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:34327 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T21:08:56,755 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-850107725_22 at /127.0.0.1:50040 [Receiving block BP-967350915-172.17.0.2-1733173705684:blk_1073741846_1029] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/0a82523b-981c-e30f-8f22-ae7c797f906e/cluster_3abf8b3b-f6f4-828c-a519-1f257913387f/dfs/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/0a82523b-981c-e30f-8f22-ae7c797f906e/cluster_3abf8b3b-f6f4-828c-a519-1f257913387f/dfs/data/data10]'}, localName='127.0.0.1:37629', datanodeUuid='6813777f-477b-4558-9cef-15f971230ace', xmitsInProgress=0}:Exception transferring block BP-967350915-172.17.0.2-1733173705684:blk_1073741846_1029 to mirror 127.0.0.1:34327 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T21:08:56,755 WARN [Thread-726 {}] hdfs.DataStreamer(1731): Error Recovery for BP-967350915-172.17.0.2-1733173705684:blk_1073741846_1029 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37629,DS-6035e3bc-16e6-4f1e-bdd3-3bc5051602af,DISK], DatanodeInfoWithStorage[127.0.0.1:34327,DS-3e1de32e-0f5b-40e5-9985-8a8ac62fe152,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:34327,DS-3e1de32e-0f5b-40e5-9985-8a8ac62fe152,DISK]) is bad. 2024-12-02T21:08:56,755 WARN [Thread-726 {}] hdfs.DataStreamer(1850): Abandoning BP-967350915-172.17.0.2-1733173705684:blk_1073741846_1029 2024-12-02T21:08:56,755 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-850107725_22 at /127.0.0.1:50040 [Receiving block BP-967350915-172.17.0.2-1733173705684:blk_1073741846_1029] {}] datanode.BlockReceiver(316): Block 1073741846 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-12-02T21:08:56,755 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-850107725_22 at /127.0.0.1:50040 [Receiving block BP-967350915-172.17.0.2-1733173705684:blk_1073741846_1029] {}] datanode.DataXceiver(331): 127.0.0.1:37629:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:50040 dst: /127.0.0.1:37629 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T21:08:56,756 WARN [Thread-726 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34327,DS-3e1de32e-0f5b-40e5-9985-8a8ac62fe152,DISK] 2024-12-02T21:08:56,759 WARN [Thread-726 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741847_1030 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:42239 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T21:08:56,759 WARN [Thread-726 {}] hdfs.DataStreamer(1731): Error Recovery for BP-967350915-172.17.0.2-1733173705684:blk_1073741847_1030 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37629,DS-6035e3bc-16e6-4f1e-bdd3-3bc5051602af,DISK], DatanodeInfoWithStorage[127.0.0.1:42239,DS-86240d69-8ed7-4081-b6bc-0889c709e6c1,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:42239,DS-86240d69-8ed7-4081-b6bc-0889c709e6c1,DISK]) is bad. 2024-12-02T21:08:56,759 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-850107725_22 at /127.0.0.1:50052 [Receiving block BP-967350915-172.17.0.2-1733173705684:blk_1073741847_1030] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/0a82523b-981c-e30f-8f22-ae7c797f906e/cluster_3abf8b3b-f6f4-828c-a519-1f257913387f/dfs/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/0a82523b-981c-e30f-8f22-ae7c797f906e/cluster_3abf8b3b-f6f4-828c-a519-1f257913387f/dfs/data/data10]'}, localName='127.0.0.1:37629', datanodeUuid='6813777f-477b-4558-9cef-15f971230ace', xmitsInProgress=0}:Exception transferring block BP-967350915-172.17.0.2-1733173705684:blk_1073741847_1030 to mirror 127.0.0.1:42239 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T21:08:56,759 WARN [Thread-726 {}] hdfs.DataStreamer(1850): Abandoning BP-967350915-172.17.0.2-1733173705684:blk_1073741847_1030 2024-12-02T21:08:56,759 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-850107725_22 at /127.0.0.1:50052 [Receiving block BP-967350915-172.17.0.2-1733173705684:blk_1073741847_1030] {}] datanode.BlockReceiver(316): Block 1073741847 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-12-02T21:08:56,759 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-850107725_22 at /127.0.0.1:50052 [Receiving block BP-967350915-172.17.0.2-1733173705684:blk_1073741847_1030] {}] datanode.DataXceiver(331): 127.0.0.1:37629:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:50052 dst: /127.0.0.1:37629 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T21:08:56,760 WARN [Thread-726 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:42239,DS-86240d69-8ed7-4081-b6bc-0889c709e6c1,DISK] 2024-12-02T21:08:56,762 WARN [Thread-726 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741848_1031 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T21:08:56,762 WARN [Thread-726 {}] hdfs.DataStreamer(1731): Error Recovery for BP-967350915-172.17.0.2-1733173705684:blk_1073741848_1031 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44475,DS-7999ded0-6f63-4ce0-8abb-657329e12700,DISK], DatanodeInfoWithStorage[127.0.0.1:37629,DS-6035e3bc-16e6-4f1e-bdd3-3bc5051602af,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44475,DS-7999ded0-6f63-4ce0-8abb-657329e12700,DISK]) is bad. 2024-12-02T21:08:56,762 WARN [Thread-726 {}] hdfs.DataStreamer(1850): Abandoning BP-967350915-172.17.0.2-1733173705684:blk_1073741848_1031 2024-12-02T21:08:56,763 WARN [Thread-726 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44475,DS-7999ded0-6f63-4ce0-8abb-657329e12700,DISK] 2024-12-02T21:08:56,764 WARN [IPC Server handler 1 on default port 40413 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-02T21:08:56,764 WARN [IPC Server handler 1 on default port 40413 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-02T21:08:56,764 WARN [IPC Server handler 1 on default port 40413 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-02T21:08:56,768 INFO [regionserver/7d4f3b9a7081:0.logRoller {}] wal.AbstractFSWAL(837): Rolled WAL /user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.1733173732646 with entries=12, filesize=12.96 KB; new WAL /user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.1733173736748 2024-12-02T21:08:56,768 DEBUG [regionserver/7d4f3b9a7081:0.logRoller {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35585:35585)] 2024-12-02T21:08:56,768 DEBUG [regionserver/7d4f3b9a7081:0.logRoller {}] wal.AbstractFSWAL(751): hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.1733173707842 is not closed yet, will try archiving it next time 2024-12-02T21:08:56,769 DEBUG [regionserver/7d4f3b9a7081:0.logRoller {}] wal.AbstractFSWAL(751): hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.1733173732646 is not closed yet, will try archiving it next time 2024-12-02T21:08:56,769 WARN [sync.0 {}] wal.FSHLog(750): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:37629,DS-6035e3bc-16e6-4f1e-bdd3-3bc5051602af,DISK]] 2024-12-02T21:08:56,769 WARN [sync.0 {}] wal.FSHLog(721): Requesting log roll because of low replication, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:37629,DS-6035e3bc-16e6-4f1e-bdd3-3bc5051602af,DISK]] 2024-12-02T21:08:56,769 DEBUG [regionserver/7d4f3b9a7081:0.logRoller {}] wal.AbstractWALRoller(197): WAL FSHLog 7d4f3b9a7081%2C46239%2C1733173707323:(num 1733173736748) roll requested 2024-12-02T21:08:56,769 INFO [regionserver/7d4f3b9a7081:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7d4f3b9a7081%2C46239%2C1733173707323.1733173736769 2024-12-02T21:08:56,770 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37629 is added to blk_1073741844_1027 (size=13275) 2024-12-02T21:08:56,773 WARN [Thread-733 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741850_1033 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T21:08:56,773 WARN [Thread-733 {}] hdfs.DataStreamer(1731): Error Recovery for BP-967350915-172.17.0.2-1733173705684:blk_1073741850_1033 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44475,DS-7999ded0-6f63-4ce0-8abb-657329e12700,DISK], DatanodeInfoWithStorage[127.0.0.1:42239,DS-86240d69-8ed7-4081-b6bc-0889c709e6c1,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44475,DS-7999ded0-6f63-4ce0-8abb-657329e12700,DISK]) is bad. 2024-12-02T21:08:56,773 WARN [Thread-733 {}] hdfs.DataStreamer(1850): Abandoning BP-967350915-172.17.0.2-1733173705684:blk_1073741850_1033 2024-12-02T21:08:56,773 WARN [Thread-733 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44475,DS-7999ded0-6f63-4ce0-8abb-657329e12700,DISK] 2024-12-02T21:08:56,774 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/data/default/TestLogRolling-testLogRollOnDatanodeDeath/9eb28e6fd3caef0514628f72efd96120/.tmp/info/9ffad5eda98f40e486f3488261620b9c is 1080, key is row0002/info:/1733173732644/Put/seqid=0 2024-12-02T21:08:56,775 WARN [Thread-733 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741851_1034 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T21:08:56,775 WARN [Thread-733 {}] hdfs.DataStreamer(1731): Error Recovery for BP-967350915-172.17.0.2-1733173705684:blk_1073741851_1034 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41471,DS-dd27ee88-6fd5-46c8-aaad-4c235dc87934,DISK], DatanodeInfoWithStorage[127.0.0.1:37629,DS-6035e3bc-16e6-4f1e-bdd3-3bc5051602af,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41471,DS-dd27ee88-6fd5-46c8-aaad-4c235dc87934,DISK]) is bad. 2024-12-02T21:08:56,775 WARN [Thread-733 {}] hdfs.DataStreamer(1850): Abandoning BP-967350915-172.17.0.2-1733173705684:blk_1073741851_1034 2024-12-02T21:08:56,776 WARN [Thread-733 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41471,DS-dd27ee88-6fd5-46c8-aaad-4c235dc87934,DISK] 2024-12-02T21:08:56,777 WARN [Thread-728 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741852_1035 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T21:08:56,778 WARN [Thread-728 {}] hdfs.DataStreamer(1731): Error Recovery for BP-967350915-172.17.0.2-1733173705684:blk_1073741852_1035 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41471,DS-dd27ee88-6fd5-46c8-aaad-4c235dc87934,DISK], DatanodeInfoWithStorage[127.0.0.1:34327,DS-3e1de32e-0f5b-40e5-9985-8a8ac62fe152,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41471,DS-dd27ee88-6fd5-46c8-aaad-4c235dc87934,DISK]) is bad. 2024-12-02T21:08:56,778 WARN [Thread-728 {}] hdfs.DataStreamer(1850): Abandoning BP-967350915-172.17.0.2-1733173705684:blk_1073741852_1035 2024-12-02T21:08:56,778 WARN [Thread-733 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741853_1036 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:34327 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T21:08:56,778 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-850107725_22 at /127.0.0.1:50064 [Receiving block BP-967350915-172.17.0.2-1733173705684:blk_1073741853_1036] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/0a82523b-981c-e30f-8f22-ae7c797f906e/cluster_3abf8b3b-f6f4-828c-a519-1f257913387f/dfs/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/0a82523b-981c-e30f-8f22-ae7c797f906e/cluster_3abf8b3b-f6f4-828c-a519-1f257913387f/dfs/data/data10]'}, localName='127.0.0.1:37629', datanodeUuid='6813777f-477b-4558-9cef-15f971230ace', xmitsInProgress=0}:Exception transferring block BP-967350915-172.17.0.2-1733173705684:blk_1073741853_1036 to mirror 127.0.0.1:34327 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T21:08:56,778 WARN [Thread-733 {}] hdfs.DataStreamer(1731): Error Recovery for BP-967350915-172.17.0.2-1733173705684:blk_1073741853_1036 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37629,DS-6035e3bc-16e6-4f1e-bdd3-3bc5051602af,DISK], DatanodeInfoWithStorage[127.0.0.1:34327,DS-3e1de32e-0f5b-40e5-9985-8a8ac62fe152,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:34327,DS-3e1de32e-0f5b-40e5-9985-8a8ac62fe152,DISK]) is bad. 2024-12-02T21:08:56,778 WARN [Thread-728 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41471,DS-dd27ee88-6fd5-46c8-aaad-4c235dc87934,DISK] 2024-12-02T21:08:56,778 WARN [Thread-733 {}] hdfs.DataStreamer(1850): Abandoning BP-967350915-172.17.0.2-1733173705684:blk_1073741853_1036 2024-12-02T21:08:56,778 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-850107725_22 at /127.0.0.1:50064 [Receiving block BP-967350915-172.17.0.2-1733173705684:blk_1073741853_1036] {}] datanode.BlockReceiver(316): Block 1073741853 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-12-02T21:08:56,779 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-850107725_22 at /127.0.0.1:50064 [Receiving block BP-967350915-172.17.0.2-1733173705684:blk_1073741853_1036] {}] datanode.DataXceiver(331): 127.0.0.1:37629:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:50064 dst: /127.0.0.1:37629 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T21:08:56,779 WARN [Thread-733 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34327,DS-3e1de32e-0f5b-40e5-9985-8a8ac62fe152,DISK] 2024-12-02T21:08:56,780 WARN [Thread-728 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741854_1037 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T21:08:56,780 WARN [Thread-728 {}] hdfs.DataStreamer(1731): Error Recovery for BP-967350915-172.17.0.2-1733173705684:blk_1073741854_1037 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42239,DS-86240d69-8ed7-4081-b6bc-0889c709e6c1,DISK], DatanodeInfoWithStorage[127.0.0.1:34327,DS-3e1de32e-0f5b-40e5-9985-8a8ac62fe152,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:42239,DS-86240d69-8ed7-4081-b6bc-0889c709e6c1,DISK]) is bad. 2024-12-02T21:08:56,780 WARN [Thread-728 {}] hdfs.DataStreamer(1850): Abandoning BP-967350915-172.17.0.2-1733173705684:blk_1073741854_1037 2024-12-02T21:08:56,780 WARN [Thread-733 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741855_1038 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T21:08:56,780 WARN [Thread-733 {}] hdfs.DataStreamer(1731): Error Recovery for BP-967350915-172.17.0.2-1733173705684:blk_1073741855_1038 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42239,DS-86240d69-8ed7-4081-b6bc-0889c709e6c1,DISK], DatanodeInfoWithStorage[127.0.0.1:37629,DS-6035e3bc-16e6-4f1e-bdd3-3bc5051602af,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:42239,DS-86240d69-8ed7-4081-b6bc-0889c709e6c1,DISK]) is bad. 2024-12-02T21:08:56,780 WARN [Thread-733 {}] hdfs.DataStreamer(1850): Abandoning BP-967350915-172.17.0.2-1733173705684:blk_1073741855_1038 2024-12-02T21:08:56,780 WARN [Thread-728 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:42239,DS-86240d69-8ed7-4081-b6bc-0889c709e6c1,DISK] 2024-12-02T21:08:56,781 WARN [Thread-733 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:42239,DS-86240d69-8ed7-4081-b6bc-0889c709e6c1,DISK] 2024-12-02T21:08:56,781 WARN [IPC Server handler 3 on default port 40413 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-02T21:08:56,781 WARN [IPC Server handler 3 on default port 40413 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-02T21:08:56,782 WARN [IPC Server handler 3 on default port 40413 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-02T21:08:56,782 WARN [Thread-728 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741856_1039 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T21:08:56,782 WARN [Thread-728 {}] hdfs.DataStreamer(1731): Error Recovery for BP-967350915-172.17.0.2-1733173705684:blk_1073741856_1039 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34327,DS-3e1de32e-0f5b-40e5-9985-8a8ac62fe152,DISK], DatanodeInfoWithStorage[127.0.0.1:37629,DS-6035e3bc-16e6-4f1e-bdd3-3bc5051602af,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34327,DS-3e1de32e-0f5b-40e5-9985-8a8ac62fe152,DISK]) is bad. 2024-12-02T21:08:56,782 WARN [Thread-728 {}] hdfs.DataStreamer(1850): Abandoning BP-967350915-172.17.0.2-1733173705684:blk_1073741856_1039 2024-12-02T21:08:56,783 WARN [Thread-728 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34327,DS-3e1de32e-0f5b-40e5-9985-8a8ac62fe152,DISK] 2024-12-02T21:08:56,784 WARN [Thread-728 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741858_1041 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T21:08:56,784 WARN [Thread-728 {}] hdfs.DataStreamer(1731): Error Recovery for BP-967350915-172.17.0.2-1733173705684:blk_1073741858_1041 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44475,DS-7999ded0-6f63-4ce0-8abb-657329e12700,DISK], DatanodeInfoWithStorage[127.0.0.1:37629,DS-6035e3bc-16e6-4f1e-bdd3-3bc5051602af,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44475,DS-7999ded0-6f63-4ce0-8abb-657329e12700,DISK]) is bad. 2024-12-02T21:08:56,784 WARN [Thread-728 {}] hdfs.DataStreamer(1850): Abandoning BP-967350915-172.17.0.2-1733173705684:blk_1073741858_1041 2024-12-02T21:08:56,785 WARN [Thread-728 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44475,DS-7999ded0-6f63-4ce0-8abb-657329e12700,DISK] 2024-12-02T21:08:56,786 WARN [IPC Server handler 4 on default port 40413 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-02T21:08:56,786 WARN [IPC Server handler 4 on default port 40413 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-02T21:08:56,786 WARN [IPC Server handler 4 on default port 40413 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-02T21:08:56,789 INFO [regionserver/7d4f3b9a7081:0.logRoller {}] wal.AbstractFSWAL(837): Rolled WAL /user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.1733173736748 with entries=1, filesize=1.22 KB; new WAL /user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.1733173736769 2024-12-02T21:08:56,789 DEBUG [regionserver/7d4f3b9a7081:0.logRoller {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35585:35585)] 2024-12-02T21:08:56,789 DEBUG [regionserver/7d4f3b9a7081:0.logRoller {}] wal.AbstractFSWAL(751): hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.1733173707842 is not closed yet, will try archiving it next time 2024-12-02T21:08:56,789 DEBUG [regionserver/7d4f3b9a7081:0.logRoller {}] wal.AbstractFSWAL(751): hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.1733173732646 is not closed yet, will try archiving it next time 2024-12-02T21:08:56,790 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37629 is added to blk_1073741859_1042 (size=10347) 2024-12-02T21:08:56,790 DEBUG [regionserver/7d4f3b9a7081:0.logRoller {}] wal.AbstractFSWAL(751): hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.1733173736748 is not closed yet, will try archiving it next time 2024-12-02T21:08:56,791 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37629 is added to blk_1073741849_1032 (size=1261) 2024-12-02T21:08:56,973 WARN [sync.2 {}] wal.FSHLog(760): Too many consecutive RollWriter requests, it's a sign of the total number of live datanodes is lower than the tolerable replicas. 2024-12-02T21:08:57,157 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-02T21:08:57,172 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(751): hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.1733173707842 is not closed yet, will try archiving it next time 2024-12-02T21:08:57,172 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(751): hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.1733173736748 is not closed yet, will try archiving it next time 2024-12-02T21:08:57,187 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-02T21:08:57,191 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=12 (bloomFilter=true), to=hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/data/default/TestLogRolling-testLogRollOnDatanodeDeath/9eb28e6fd3caef0514628f72efd96120/.tmp/info/9ffad5eda98f40e486f3488261620b9c 2024-12-02T21:08:57,192 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-02T21:08:57,192 DEBUG [Close-WAL-Writer-2 {}] wal.AbstractFSWAL(751): hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.1733173707842 is not closed yet, will try archiving it next time 2024-12-02T21:08:57,193 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-02T21:08:57,193 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-02T21:08:57,193 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-02T21:08:57,194 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1b3886a5{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/0a82523b-981c-e30f-8f22-ae7c797f906e/hadoop.log.dir/,AVAILABLE} 2024-12-02T21:08:57,194 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4d900df1{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-02T21:08:57,199 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/data/default/TestLogRolling-testLogRollOnDatanodeDeath/9eb28e6fd3caef0514628f72efd96120/.tmp/info/9ffad5eda98f40e486f3488261620b9c as hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/data/default/TestLogRolling-testLogRollOnDatanodeDeath/9eb28e6fd3caef0514628f72efd96120/info/9ffad5eda98f40e486f3488261620b9c 2024-12-02T21:08:57,206 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/data/default/TestLogRolling-testLogRollOnDatanodeDeath/9eb28e6fd3caef0514628f72efd96120/info/9ffad5eda98f40e486f3488261620b9c, entries=5, sequenceid=12, filesize=10.1 K 2024-12-02T21:08:57,206 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=8.40 KB/8606 for 9eb28e6fd3caef0514628f72efd96120 in 454ms, sequenceid=12, compaction requested=false 2024-12-02T21:08:57,207 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 9eb28e6fd3caef0514628f72efd96120: 2024-12-02T21:08:57,285 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7708a86b{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/0a82523b-981c-e30f-8f22-ae7c797f906e/java.io.tmpdir/jetty-localhost-44285-hadoop-hdfs-3_4_1-tests_jar-_-any-15122617198314083523/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-02T21:08:57,286 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6fc6c011{HTTP/1.1, (http/1.1)}{localhost:44285} 2024-12-02T21:08:57,286 INFO [Time-limited test {}] server.Server(415): Started @148666ms 2024-12-02T21:08:57,287 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-02T21:08:57,612 WARN [Thread-757 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-02T21:08:57,619 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xe972281a85981a68 with lease ID 0x89dd5ecfbf8accef: from storage DS-7999ded0-6f63-4ce0-8abb-657329e12700 node DatanodeRegistration(127.0.0.1:45501, datanodeUuid=e2a33afa-21be-4db5-98c1-a8e5ef378ea1, infoPort=39357, infoSecurePort=0, ipcPort=36689, storageInfo=lv=-57;cid=testClusterID;nsid=1125043526;c=1733173705684), blocks: 8, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-02T21:08:57,619 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xe972281a85981a68 with lease ID 0x89dd5ecfbf8accef: from storage DS-7884db6b-68f9-446a-bf88-5889e749b504 node DatanodeRegistration(127.0.0.1:45501, datanodeUuid=e2a33afa-21be-4db5-98c1-a8e5ef378ea1, infoPort=39357, infoSecurePort=0, ipcPort=36689, storageInfo=lv=-57;cid=testClusterID;nsid=1125043526;c=1733173705684), blocks: 7, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-02T21:08:57,619 WARN [master/7d4f3b9a7081:0:becomeActiveMaster.append-pool-0 {}] wal.FSHLog$RingBufferEventHandler(1189): Append sequenceId=96, requesting roll of WAL java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42239,DS-86240d69-8ed7-4081-b6bc-0889c709e6c1,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T21:08:57,619 DEBUG [master:store-WAL-Roller {}] wal.AbstractWALRoller(197): WAL FSHLog 7d4f3b9a7081%2C36403%2C1733173707177:(num 1733173707435) roll requested 2024-12-02T21:08:57,620 ERROR [ProcExecTimeout {}] region.RegionProcedureStore(422): Failed to delete pids=[4, 7, 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] org.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=96, requesting roll of WAL at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.append(FSHLog.java:1191) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.onEvent(FSHLog.java:1064) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.onEvent(FSHLog.java:967) ~[classes/:?] at com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:168) ~[disruptor-3.4.4.jar:?] at com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) ~[disruptor-3.4.4.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42239,DS-86240d69-8ed7-4081-b6bc-0889c709e6c1,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T21:08:57,620 INFO [master:store-WAL-Roller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7d4f3b9a7081%2C36403%2C1733173707177.1733173737620 2024-12-02T21:08:57,620 ERROR [ProcExecTimeout {}] procedure2.TimeoutExecutorThread(124): Ignoring pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner exception: org.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=96, requesting roll of WAL java.io.UncheckedIOException: org.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=96, requesting roll of WAL at org.apache.hadoop.hbase.procedure2.store.region.RegionProcedureStore.delete(RegionProcedureStore.java:423) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner.periodicExecute(CompletedProcedureCleaner.java:135) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.TimeoutExecutorThread.executeInMemoryChore(TimeoutExecutorThread.java:122) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.TimeoutExecutorThread.execDelayedProcedure(TimeoutExecutorThread.java:101) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.TimeoutExecutorThread.run(TimeoutExecutorThread.java:68) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] Caused by: org.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=96, requesting roll of WAL at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.append(FSHLog.java:1191) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.onEvent(FSHLog.java:1064) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.onEvent(FSHLog.java:967) ~[classes/:?] at com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:168) ~[disruptor-3.4.4.jar:?] at com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) ~[disruptor-3.4.4.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42239,DS-86240d69-8ed7-4081-b6bc-0889c709e6c1,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T21:08:57,623 WARN [Thread-775 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741860_1043 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T21:08:57,623 WARN [Thread-775 {}] hdfs.DataStreamer(1731): Error Recovery for BP-967350915-172.17.0.2-1733173705684:blk_1073741860_1043 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41471,DS-dd27ee88-6fd5-46c8-aaad-4c235dc87934,DISK], DatanodeInfoWithStorage[127.0.0.1:37629,DS-6035e3bc-16e6-4f1e-bdd3-3bc5051602af,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41471,DS-dd27ee88-6fd5-46c8-aaad-4c235dc87934,DISK]) is bad. 2024-12-02T21:08:57,624 WARN [Thread-775 {}] hdfs.DataStreamer(1850): Abandoning BP-967350915-172.17.0.2-1733173705684:blk_1073741860_1043 2024-12-02T21:08:57,624 WARN [Thread-775 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41471,DS-dd27ee88-6fd5-46c8-aaad-4c235dc87934,DISK] 2024-12-02T21:08:57,629 WARN [master:store-WAL-Roller {}] wal.FSHLog(373): Failed sync-before-close but no outstanding appends; closing WALorg.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=96, requesting roll of WAL 2024-12-02T21:08:57,630 INFO [master:store-WAL-Roller {}] wal.AbstractFSWAL(837): Rolled WAL /user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/MasterData/WALs/7d4f3b9a7081,36403,1733173707177/7d4f3b9a7081%2C36403%2C1733173707177.1733173707435 with entries=93, filesize=46.04 KB; new WAL /user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/MasterData/WALs/7d4f3b9a7081,36403,1733173707177/7d4f3b9a7081%2C36403%2C1733173707177.1733173737620 2024-12-02T21:08:57,630 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39357:39357),(127.0.0.1/127.0.0.1:35585:35585)] 2024-12-02T21:08:57,630 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(751): hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/MasterData/WALs/7d4f3b9a7081,36403,1733173707177/7d4f3b9a7081%2C36403%2C1733173707177.1733173707435 is not closed yet, will try archiving it next time 2024-12-02T21:08:57,630 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42239,DS-86240d69-8ed7-4081-b6bc-0889c709e6c1,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T21:08:57,630 WARN [Close-WAL-Writer-0 {}] wal.FSHLog(462): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42239,DS-86240d69-8ed7-4081-b6bc-0889c709e6c1,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T21:08:57,630 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/MasterData/WALs/7d4f3b9a7081,36403,1733173707177/7d4f3b9a7081%2C36403%2C1733173707177.1733173707435 2024-12-02T21:08:57,631 WARN [IPC Server handler 4 on default port 40413 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/MasterData/WALs/7d4f3b9a7081,36403,1733173707177/7d4f3b9a7081%2C36403%2C1733173707177.1733173707435 has not been closed. Lease recovery is in progress. RecoveryId = 1045 for block blk_1073741830_1006 2024-12-02T21:08:57,631 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/MasterData/WALs/7d4f3b9a7081,36403,1733173707177/7d4f3b9a7081%2C36403%2C1733173707177.1733173707435 after 1ms 2024-12-02T21:08:59,438 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@5eec733d[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:37629, datanodeUuid=6813777f-477b-4558-9cef-15f971230ace, infoPort=35585, infoSecurePort=0, ipcPort=38151, storageInfo=lv=-57;cid=testClusterID;nsid=1125043526;c=1733173705684):Failed to transfer BP-967350915-172.17.0.2-1733173705684:blk_1073741844_1027 to 127.0.0.1:34327 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T21:08:59,438 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@3556b774[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:37629, datanodeUuid=6813777f-477b-4558-9cef-15f971230ace, infoPort=35585, infoSecurePort=0, ipcPort=38151, storageInfo=lv=-57;cid=testClusterID;nsid=1125043526;c=1733173705684):Failed to transfer BP-967350915-172.17.0.2-1733173705684:blk_1073741859_1042 to 127.0.0.1:42239 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T21:09:00,437 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@3556b774[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:37629, datanodeUuid=6813777f-477b-4558-9cef-15f971230ace, infoPort=35585, infoSecurePort=0, ipcPort=38151, storageInfo=lv=-57;cid=testClusterID;nsid=1125043526;c=1733173705684):Failed to transfer BP-967350915-172.17.0.2-1733173705684:blk_1073741849_1032 to 127.0.0.1:42239 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T21:09:01,633 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/MasterData/WALs/7d4f3b9a7081,36403,1733173707177/7d4f3b9a7081%2C36403%2C1733173707177.1733173707435 after 4002ms 2024-12-02T21:09:01,673 DEBUG [BootstrapNodeManager {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-02T21:09:01,674 INFO [RS-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40990, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=RegionServerStatusService 2024-12-02T21:09:02,624 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37629 is added to blk_1073741838_1014 (size=76) 2024-12-02T21:09:02,624 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37629 is added to blk_1073741836_1012 (size=42) 2024-12-02T21:09:03,240 DEBUG [BootstrapNodeManager {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-02T21:09:03,244 INFO [RS-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41004, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=RegionServerStatusService 2024-12-02T21:09:03,619 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@1d10fd5f[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:45501, datanodeUuid=e2a33afa-21be-4db5-98c1-a8e5ef378ea1, infoPort=39357, infoSecurePort=0, ipcPort=36689, storageInfo=lv=-57;cid=testClusterID;nsid=1125043526;c=1733173705684):Failed to transfer BP-967350915-172.17.0.2-1733173705684:blk_1073741832_1008 to 127.0.0.1:41471 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T21:09:03,623 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37629 is added to blk_1073741828_1004 (size=1189) 2024-12-02T21:09:05,620 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@25118eaf[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:45501, datanodeUuid=e2a33afa-21be-4db5-98c1-a8e5ef378ea1, infoPort=39357, infoSecurePort=0, ipcPort=36689, storageInfo=lv=-57;cid=testClusterID;nsid=1125043526;c=1733173705684):Failed to transfer BP-967350915-172.17.0.2-1733173705684:blk_1073741829_1005 to 127.0.0.1:34327 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T21:09:05,620 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@1d10fd5f[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:45501, datanodeUuid=e2a33afa-21be-4db5-98c1-a8e5ef378ea1, infoPort=39357, infoSecurePort=0, ipcPort=36689, storageInfo=lv=-57;cid=testClusterID;nsid=1125043526;c=1733173705684):Failed to transfer BP-967350915-172.17.0.2-1733173705684:blk_1073741827_1003 to 127.0.0.1:34327 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T21:09:06,618 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@1d10fd5f[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:45501, datanodeUuid=e2a33afa-21be-4db5-98c1-a8e5ef378ea1, infoPort=39357, infoSecurePort=0, ipcPort=36689, storageInfo=lv=-57;cid=testClusterID;nsid=1125043526;c=1733173705684):Failed to transfer BP-967350915-172.17.0.2-1733173705684:blk_1073741825_1001 to 127.0.0.1:34327 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T21:09:07,634 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@14cfbac1 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-967350915-172.17.0.2-1733173705684:blk_1073741833_1009, datanode=DatanodeInfoWithStorage[127.0.0.1:42239,null,null]) java.net.ConnectException: Call From 7d4f3b9a7081/172.17.0.2 to localhost:39531 failed on connection exception: java.net.ConnectException: Connection refused; For more details see: http://wiki.apache.org/hadoop/ConnectionRefused at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:876) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:668) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-12-02T21:09:07,674 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45501 is added to blk_1073741833_1022 (size=959) 2024-12-02T21:09:08,534 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(818): Archiving hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.1733173720471 to hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/oldWALs/7d4f3b9a7081%2C46239%2C1733173707323.1733173720471 2024-12-02T21:09:08,621 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@1d10fd5f[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:45501, datanodeUuid=e2a33afa-21be-4db5-98c1-a8e5ef378ea1, infoPort=39357, infoSecurePort=0, ipcPort=36689, storageInfo=lv=-57;cid=testClusterID;nsid=1125043526;c=1733173705684):Failed to transfer BP-967350915-172.17.0.2-1733173705684:blk_1073741826_1002 to 127.0.0.1:41471 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T21:09:08,626 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37629 is added to blk_1073741837_1013 (size=393) 2024-12-02T21:09:09,536 INFO [master/7d4f3b9a7081:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-12-02T21:09:09,536 INFO [master/7d4f3b9a7081:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-12-02T21:09:09,621 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@25118eaf[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:45501, datanodeUuid=e2a33afa-21be-4db5-98c1-a8e5ef378ea1, infoPort=39357, infoSecurePort=0, ipcPort=36689, storageInfo=lv=-57;cid=testClusterID;nsid=1125043526;c=1733173705684):Failed to transfer BP-967350915-172.17.0.2-1733173705684:blk_1073741835_1011 to 127.0.0.1:34327 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T21:09:09,625 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37629 is added to blk_1073741831_1007 (size=1039) 2024-12-02T21:09:11,618 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@1d10fd5f[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:45501, datanodeUuid=e2a33afa-21be-4db5-98c1-a8e5ef378ea1, infoPort=39357, infoSecurePort=0, ipcPort=36689, storageInfo=lv=-57;cid=testClusterID;nsid=1125043526;c=1733173705684):Failed to transfer BP-967350915-172.17.0.2-1733173705684:blk_1073741833_1022 to 127.0.0.1:41471 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T21:09:14,668 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(324): Region 9eb28e6fd3caef0514628f72efd96120, had cached 0 bytes from a total of 10347 2024-12-02T21:09:16,270 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7d4f3b9a7081%2C46239%2C1733173707323.1733173756270 2024-12-02T21:09:16,274 WARN [Thread-795 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741862_1046 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T21:09:16,274 WARN [Thread-795 {}] hdfs.DataStreamer(1731): Error Recovery for BP-967350915-172.17.0.2-1733173705684:blk_1073741862_1046 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34327,DS-3e1de32e-0f5b-40e5-9985-8a8ac62fe152,DISK], DatanodeInfoWithStorage[127.0.0.1:41471,DS-dd27ee88-6fd5-46c8-aaad-4c235dc87934,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34327,DS-3e1de32e-0f5b-40e5-9985-8a8ac62fe152,DISK]) is bad. 2024-12-02T21:09:16,274 WARN [Thread-795 {}] hdfs.DataStreamer(1850): Abandoning BP-967350915-172.17.0.2-1733173705684:blk_1073741862_1046 2024-12-02T21:09:16,275 WARN [Thread-795 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34327,DS-3e1de32e-0f5b-40e5-9985-8a8ac62fe152,DISK] 2024-12-02T21:09:16,283 INFO [Time-limited test {}] wal.AbstractFSWAL(837): Rolled WAL /user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.1733173736769 with entries=2, filesize=1.57 KB; new WAL /user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.1733173756270 2024-12-02T21:09:16,283 DEBUG [Time-limited test {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39357:39357),(127.0.0.1/127.0.0.1:35585:35585)] 2024-12-02T21:09:16,283 DEBUG [Time-limited test {}] wal.AbstractFSWAL(751): hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.1733173736769 is not closed yet, will try archiving it next time 2024-12-02T21:09:16,285 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37629 is added to blk_1073741857_1040 (size=1618) 2024-12-02T21:09:16,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(8581): Flush requested on 9eb28e6fd3caef0514628f72efd96120 2024-12-02T21:09:16,285 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 9eb28e6fd3caef0514628f72efd96120 1/1 column families, dataSize=9.46 KB heapSize=10.38 KB 2024-12-02T21:09:16,286 INFO [sync.1 {}] wal.FSHLog(777): LowReplication-Roller was enabled. 2024-12-02T21:09:16,291 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/data/default/TestLogRolling-testLogRollOnDatanodeDeath/9eb28e6fd3caef0514628f72efd96120/.tmp/info/e99eada342d548d08a0eb51f365535e4 is 1080, key is row0007/info:/1733173736754/Put/seqid=0 2024-12-02T21:09:16,297 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45501 is added to blk_1073741864_1048 (size=13583) 2024-12-02T21:09:16,298 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37629 is added to blk_1073741864_1048 (size=13583) 2024-12-02T21:09:16,298 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=9.46 KB at sequenceid=24 (bloomFilter=true), to=hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/data/default/TestLogRolling-testLogRollOnDatanodeDeath/9eb28e6fd3caef0514628f72efd96120/.tmp/info/e99eada342d548d08a0eb51f365535e4 2024-12-02T21:09:16,301 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1340): Shutting down minicluster 2024-12-02T21:09:16,302 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-12-02T21:09:16,302 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x658b1d03 to 127.0.0.1:58708 2024-12-02T21:09:16,302 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T21:09:16,302 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-02T21:09:16,302 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=776946617, stopped=false 2024-12-02T21:09:16,302 INFO [Time-limited test {}] master.ServerManager(987): Cluster shutdown requested of master=7d4f3b9a7081,36403,1733173707177 2024-12-02T21:09:16,307 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/data/default/TestLogRolling-testLogRollOnDatanodeDeath/9eb28e6fd3caef0514628f72efd96120/.tmp/info/e99eada342d548d08a0eb51f365535e4 as hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/data/default/TestLogRolling-testLogRollOnDatanodeDeath/9eb28e6fd3caef0514628f72efd96120/info/e99eada342d548d08a0eb51f365535e4 2024-12-02T21:09:16,315 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/data/default/TestLogRolling-testLogRollOnDatanodeDeath/9eb28e6fd3caef0514628f72efd96120/info/e99eada342d548d08a0eb51f365535e4, entries=8, sequenceid=24, filesize=13.3 K 2024-12-02T21:09:16,316 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~9.46 KB/9682, heapSize ~10.36 KB/10608, currentSize=9.46 KB/9684 for 9eb28e6fd3caef0514628f72efd96120 in 31ms, sequenceid=24, compaction requested=false 2024-12-02T21:09:16,316 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 9eb28e6fd3caef0514628f72efd96120: 2024-12-02T21:09:16,316 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(109): Should split because info size=23.4 K, sizeToCheck=16.0 K 2024-12-02T21:09:16,316 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-02T21:09:16,316 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/data/default/TestLogRolling-testLogRollOnDatanodeDeath/9eb28e6fd3caef0514628f72efd96120/info/e99eada342d548d08a0eb51f365535e4 because midkey is the same as first or last row 2024-12-02T21:09:16,319 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36403-0x1019929a40e0000, quorum=127.0.0.1:58708, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-02T21:09:16,319 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42679-0x1019929a40e0003, quorum=127.0.0.1:58708, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-02T21:09:16,319 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46239-0x1019929a40e0001, quorum=127.0.0.1:58708, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-02T21:09:16,319 INFO [Time-limited test {}] procedure2.ProcedureExecutor(700): Stopping 2024-12-02T21:09:16,319 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36403-0x1019929a40e0000, quorum=127.0.0.1:58708, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T21:09:16,319 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42679-0x1019929a40e0003, quorum=127.0.0.1:58708, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T21:09:16,319 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46239-0x1019929a40e0001, quorum=127.0.0.1:58708, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T21:09:16,319 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T21:09:16,320 INFO [Time-limited test {}] regionserver.HRegionServer(2561): ***** STOPPING region server '7d4f3b9a7081,46239,1733173707323' ***** 2024-12-02T21:09:16,320 INFO [Time-limited test {}] regionserver.HRegionServer(2575): STOPPED: Shutdown requested 2024-12-02T21:09:16,320 INFO [Time-limited test {}] regionserver.HRegionServer(2561): ***** STOPPING region server '7d4f3b9a7081,42679,1733173709184' ***** 2024-12-02T21:09:16,320 INFO [Time-limited test {}] regionserver.HRegionServer(2575): STOPPED: Shutdown requested 2024-12-02T21:09:16,320 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:36403-0x1019929a40e0000, quorum=127.0.0.1:58708, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-02T21:09:16,320 INFO [RS:1;7d4f3b9a7081:42679 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-02T21:09:16,320 INFO [RS:1;7d4f3b9a7081:42679 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-02T21:09:16,320 INFO [RS:1;7d4f3b9a7081:42679 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-02T21:09:16,320 INFO [RS:0;7d4f3b9a7081:46239 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-02T21:09:16,320 INFO [RS:1;7d4f3b9a7081:42679 {}] regionserver.HRegionServer(1224): stopping server 7d4f3b9a7081,42679,1733173709184 2024-12-02T21:09:16,320 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:42679-0x1019929a40e0003, quorum=127.0.0.1:58708, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-02T21:09:16,320 INFO [RS:0;7d4f3b9a7081:46239 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-02T21:09:16,320 DEBUG [RS:1;7d4f3b9a7081:42679 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T21:09:16,320 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:46239-0x1019929a40e0001, quorum=127.0.0.1:58708, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-02T21:09:16,320 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(373): MemStoreFlusher.0 exiting 2024-12-02T21:09:16,320 INFO [RS:0;7d4f3b9a7081:46239 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-02T21:09:16,320 INFO [RS:1;7d4f3b9a7081:42679 {}] regionserver.HRegionServer(1250): stopping server 7d4f3b9a7081,42679,1733173709184; all regions closed. 2024-12-02T21:09:16,320 INFO [RS:0;7d4f3b9a7081:46239 {}] regionserver.HRegionServer(3579): Received CLOSE for 9eb28e6fd3caef0514628f72efd96120 2024-12-02T21:09:16,321 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(373): MemStoreFlusher.0 exiting 2024-12-02T21:09:16,321 DEBUG [WAL-Shutdown-0 {}] wal.FSHLog(499): Closing WAL writer in /user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,42679,1733173709184 2024-12-02T21:09:16,321 WARN [WAL-Shutdown-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42239,DS-86240d69-8ed7-4081-b6bc-0889c709e6c1,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T21:09:16,321 INFO [RS:0;7d4f3b9a7081:46239 {}] regionserver.HRegionServer(3579): Received CLOSE for c3ed35c3efec05998439d93744730396 2024-12-02T21:09:16,322 INFO [RS:0;7d4f3b9a7081:46239 {}] regionserver.HRegionServer(1224): stopping server 7d4f3b9a7081,46239,1733173707323 2024-12-02T21:09:16,322 DEBUG [RS_CLOSE_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1681): Closing 9eb28e6fd3caef0514628f72efd96120, disabling compactions & flushes 2024-12-02T21:09:16,322 DEBUG [RS:0;7d4f3b9a7081:46239 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T21:09:16,322 ERROR [RS:1;7d4f3b9a7081:42679 {}] regionserver.HRegionServer(1664): Shutdown / close of WAL failed: java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42239,DS-86240d69-8ed7-4081-b6bc-0889c709e6c1,DISK]] are bad. Aborting... 2024-12-02T21:09:16,322 INFO [RS:0;7d4f3b9a7081:46239 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-02T21:09:16,322 INFO [RS_CLOSE_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1703): Closing region TestLogRolling-testLogRollOnDatanodeDeath,,1733173709298.9eb28e6fd3caef0514628f72efd96120. 2024-12-02T21:09:16,322 INFO [RS:0;7d4f3b9a7081:46239 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-02T21:09:16,322 INFO [RS:0;7d4f3b9a7081:46239 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-02T21:09:16,322 DEBUG [RS:1;7d4f3b9a7081:42679 {}] regionserver.HRegionServer(1665): Shutdown / close exception details: java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42239,DS-86240d69-8ed7-4081-b6bc-0889c709e6c1,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T21:09:16,322 DEBUG [RS_CLOSE_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1733173709298.9eb28e6fd3caef0514628f72efd96120. 2024-12-02T21:09:16,322 DEBUG [RS:1;7d4f3b9a7081:42679 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T21:09:16,322 DEBUG [RS_CLOSE_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1791): Acquired close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1733173709298.9eb28e6fd3caef0514628f72efd96120. after waiting 0 ms 2024-12-02T21:09:16,322 INFO [RS:0;7d4f3b9a7081:46239 {}] regionserver.HRegionServer(3579): Received CLOSE for 1588230740 2024-12-02T21:09:16,322 DEBUG [RS_CLOSE_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1801): Updates disabled for region TestLogRolling-testLogRollOnDatanodeDeath,,1733173709298.9eb28e6fd3caef0514628f72efd96120. 2024-12-02T21:09:16,322 INFO [RS:1;7d4f3b9a7081:42679 {}] regionserver.LeaseManager(133): Closed leases 2024-12-02T21:09:16,322 INFO [RS:0;7d4f3b9a7081:46239 {}] regionserver.HRegionServer(1599): Waiting on 3 regions to close 2024-12-02T21:09:16,322 INFO [RS_CLOSE_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2837): Flushing 9eb28e6fd3caef0514628f72efd96120 1/1 column families, dataSize=9.46 KB heapSize=10.38 KB 2024-12-02T21:09:16,322 DEBUG [RS:0;7d4f3b9a7081:46239 {}] regionserver.HRegionServer(1603): Online Regions={9eb28e6fd3caef0514628f72efd96120=TestLogRolling-testLogRollOnDatanodeDeath,,1733173709298.9eb28e6fd3caef0514628f72efd96120., 1588230740=hbase:meta,,1.1588230740, c3ed35c3efec05998439d93744730396=hbase:namespace,,1733173708405.c3ed35c3efec05998439d93744730396.} 2024-12-02T21:09:16,322 DEBUG [RS_CLOSE_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1681): Closing 1588230740, disabling compactions & flushes 2024-12-02T21:09:16,322 INFO [RS:1;7d4f3b9a7081:42679 {}] hbase.ChoreService(370): Chore service for: regionserver/7d4f3b9a7081:0 had [ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-12-02T21:09:16,322 INFO [RS_CLOSE_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1703): Closing region hbase:meta,,1.1588230740 2024-12-02T21:09:16,322 DEBUG [RS:0;7d4f3b9a7081:46239 {}] regionserver.HRegionServer(1629): Waiting on 1588230740, 9eb28e6fd3caef0514628f72efd96120, c3ed35c3efec05998439d93744730396 2024-12-02T21:09:16,322 DEBUG [RS_CLOSE_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:meta,,1.1588230740 2024-12-02T21:09:16,322 INFO [RS:1;7d4f3b9a7081:42679 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-02T21:09:16,322 DEBUG [RS_CLOSE_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1791): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-02T21:09:16,322 INFO [RS:1;7d4f3b9a7081:42679 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-02T21:09:16,322 INFO [regionserver/7d4f3b9a7081:0.logRoller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-12-02T21:09:16,323 DEBUG [RS_CLOSE_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1801): Updates disabled for region hbase:meta,,1.1588230740 2024-12-02T21:09:16,323 INFO [RS:1;7d4f3b9a7081:42679 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-02T21:09:16,323 INFO [RS:1;7d4f3b9a7081:42679 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:42679 2024-12-02T21:09:16,323 INFO [RS_CLOSE_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2837): Flushing 1588230740 3/3 column families, dataSize=2.87 KB heapSize=5.40 KB 2024-12-02T21:09:16,323 WARN [RS_OPEN_META-regionserver/7d4f3b9a7081:0-0.append-pool-0 {}] wal.FSHLog$RingBufferEventHandler(1189): Append sequenceId=15, requesting roll of WAL java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42239,DS-86240d69-8ed7-4081-b6bc-0889c709e6c1,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T21:09:16,323 DEBUG [regionserver/7d4f3b9a7081:0.logRoller {}] wal.AbstractWALRoller(197): WAL FSHLog 7d4f3b9a7081%2C46239%2C1733173707323.meta:.meta(num 1733173708307) roll requested 2024-12-02T21:09:16,324 DEBUG [RS_CLOSE_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1635): Region close journal for 1588230740: 2024-12-02T21:09:16,324 INFO [regionserver/7d4f3b9a7081:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7d4f3b9a7081%2C46239%2C1733173707323.meta.1733173756324.meta 2024-12-02T21:09:16,324 ERROR [RS_CLOSE_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionServer(2808): ***** ABORTING region server 7d4f3b9a7081,46239,1733173707323: Unrecoverable exception while closing hbase:meta,,1.1588230740 ***** org.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=15, requesting roll of WAL at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.append(FSHLog.java:1191) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.onEvent(FSHLog.java:1064) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.onEvent(FSHLog.java:967) ~[classes/:?] at com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:168) ~[disruptor-3.4.4.jar:?] at com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) ~[disruptor-3.4.4.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42239,DS-86240d69-8ed7-4081-b6bc-0889c709e6c1,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T21:09:16,324 ERROR [RS_CLOSE_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionServer(2815): RegionServer abort: loaded coprocessors are: [org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint] 2024-12-02T21:09:16,327 WARN [Thread-809 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741865_1049 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T21:09:16,327 WARN [Thread-809 {}] hdfs.DataStreamer(1731): Error Recovery for BP-967350915-172.17.0.2-1733173705684:blk_1073741865_1049 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34327,DS-3e1de32e-0f5b-40e5-9985-8a8ac62fe152,DISK], DatanodeInfoWithStorage[127.0.0.1:37629,DS-6035e3bc-16e6-4f1e-bdd3-3bc5051602af,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34327,DS-3e1de32e-0f5b-40e5-9985-8a8ac62fe152,DISK]) is bad. 2024-12-02T21:09:16,327 WARN [Thread-809 {}] hdfs.DataStreamer(1850): Abandoning BP-967350915-172.17.0.2-1733173705684:blk_1073741865_1049 2024-12-02T21:09:16,327 DEBUG [RS_CLOSE_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_META}] util.JSONBean(135): Listing beans for java.lang:type=Memory 2024-12-02T21:09:16,327 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36403-0x1019929a40e0000, quorum=127.0.0.1:58708, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-02T21:09:16,327 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42679-0x1019929a40e0003, quorum=127.0.0.1:58708, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/7d4f3b9a7081,42679,1733173709184 2024-12-02T21:09:16,327 WARN [Thread-809 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34327,DS-3e1de32e-0f5b-40e5-9985-8a8ac62fe152,DISK] 2024-12-02T21:09:16,328 DEBUG [RS_CLOSE_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/data/default/TestLogRolling-testLogRollOnDatanodeDeath/9eb28e6fd3caef0514628f72efd96120/.tmp/info/ed67bae6bfee44aeb12e7f7b85659e6e is 1080, key is row0014/info:/1733173756286/Put/seqid=0 2024-12-02T21:09:16,329 DEBUG [RS_CLOSE_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_META}] util.JSONBean(135): Listing beans for Hadoop:service=HBase,name=RegionServer,sub=IPC 2024-12-02T21:09:16,329 DEBUG [RS_CLOSE_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_META}] util.JSONBean(135): Listing beans for Hadoop:service=HBase,name=RegionServer,sub=Replication 2024-12-02T21:09:16,329 DEBUG [RS_CLOSE_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_META}] util.JSONBean(135): Listing beans for Hadoop:service=HBase,name=RegionServer,sub=Server 2024-12-02T21:09:16,329 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [7d4f3b9a7081,42679,1733173709184] 2024-12-02T21:09:16,329 DEBUG [RegionServerTracker-0 {}] master.DeadServer(103): Processing 7d4f3b9a7081,42679,1733173709184; numProcessing=1 2024-12-02T21:09:16,329 INFO [RS_CLOSE_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionServer(2819): Dump of metrics as JSON on abort: { "beans": [ { "name": "java.lang:type=Memory", "modelerType": "sun.management.MemoryImpl", "ObjectPendingFinalizationCount": 0, "HeapMemoryUsage": { "committed": 1048576000, "init": 1048576000, "max": 2306867200, "used": 267149712 }, "NonHeapMemoryUsage": { "committed": 161677312, "init": 7667712, "max": -1, "used": 159910768 }, "Verbose": false, "ObjectName": "java.lang:type=Memory" } ], "beans": [], "beans": [], "beans": [] } 2024-12-02T21:09:16,329 WARN [Thread-809 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741866_1050 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T21:09:16,329 WARN [Thread-809 {}] hdfs.DataStreamer(1731): Error Recovery for BP-967350915-172.17.0.2-1733173705684:blk_1073741866_1050 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41471,DS-dd27ee88-6fd5-46c8-aaad-4c235dc87934,DISK], DatanodeInfoWithStorage[127.0.0.1:45501,DS-7999ded0-6f63-4ce0-8abb-657329e12700,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41471,DS-dd27ee88-6fd5-46c8-aaad-4c235dc87934,DISK]) is bad. 2024-12-02T21:09:16,329 WARN [Thread-809 {}] hdfs.DataStreamer(1850): Abandoning BP-967350915-172.17.0.2-1733173705684:blk_1073741866_1050 2024-12-02T21:09:16,330 WARN [Thread-809 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41471,DS-dd27ee88-6fd5-46c8-aaad-4c235dc87934,DISK] 2024-12-02T21:09:16,330 WARN [Thread-810 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741867_1051 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T21:09:16,330 WARN [Thread-810 {}] hdfs.DataStreamer(1731): Error Recovery for BP-967350915-172.17.0.2-1733173705684:blk_1073741867_1051 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41471,DS-dd27ee88-6fd5-46c8-aaad-4c235dc87934,DISK], DatanodeInfoWithStorage[127.0.0.1:45501,DS-7999ded0-6f63-4ce0-8abb-657329e12700,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41471,DS-dd27ee88-6fd5-46c8-aaad-4c235dc87934,DISK]) is bad. 2024-12-02T21:09:16,330 WARN [Thread-810 {}] hdfs.DataStreamer(1850): Abandoning BP-967350915-172.17.0.2-1733173705684:blk_1073741867_1051 2024-12-02T21:09:16,330 WARN [Thread-810 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41471,DS-dd27ee88-6fd5-46c8-aaad-4c235dc87934,DISK] 2024-12-02T21:09:16,332 WARN [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36403 {}] master.MasterRpcServices(626): 7d4f3b9a7081,46239,1733173707323 reported a fatal error: ***** ABORTING region server 7d4f3b9a7081,46239,1733173707323: Unrecoverable exception while closing hbase:meta,,1.1588230740 ***** Cause: org.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=15, requesting roll of WAL at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.append(FSHLog.java:1191) at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.onEvent(FSHLog.java:1064) at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.onEvent(FSHLog.java:967) at com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:168) at com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) at java.base/java.lang.Thread.run(Thread.java:840) Caused by: java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42239,DS-86240d69-8ed7-4081-b6bc-0889c709e6c1,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) 2024-12-02T21:09:16,340 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45501 is added to blk_1073741869_1053 (size=14663) 2024-12-02T21:09:16,340 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37629 is added to blk_1073741869_1053 (size=14663) 2024-12-02T21:09:16,341 INFO [RS_CLOSE_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=9.46 KB at sequenceid=36 (bloomFilter=true), to=hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/data/default/TestLogRolling-testLogRollOnDatanodeDeath/9eb28e6fd3caef0514628f72efd96120/.tmp/info/ed67bae6bfee44aeb12e7f7b85659e6e 2024-12-02T21:09:16,343 WARN [regionserver/7d4f3b9a7081:0.logRoller {}] wal.FSHLog(373): Failed sync-before-close but no outstanding appends; closing WALorg.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=15, requesting roll of WAL 2024-12-02T21:09:16,343 INFO [regionserver/7d4f3b9a7081:0.logRoller {}] wal.AbstractFSWAL(837): Rolled WAL /user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.meta.1733173708307.meta with entries=11, filesize=3.63 KB; new WAL /user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.meta.1733173756324.meta 2024-12-02T21:09:16,344 DEBUG [regionserver/7d4f3b9a7081:0.logRoller {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39357:39357),(127.0.0.1/127.0.0.1:35585:35585)] 2024-12-02T21:09:16,344 DEBUG [regionserver/7d4f3b9a7081:0.logRoller {}] wal.AbstractFSWAL(751): hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.meta.1733173708307.meta is not closed yet, will try archiving it next time 2024-12-02T21:09:16,344 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42239,DS-86240d69-8ed7-4081-b6bc-0889c709e6c1,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T21:09:16,344 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/draining/7d4f3b9a7081,42679,1733173709184 already deleted, retry=false 2024-12-02T21:09:16,344 WARN [Close-WAL-Writer-0 {}] wal.FSHLog(462): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42239,DS-86240d69-8ed7-4081-b6bc-0889c709e6c1,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T21:09:16,344 INFO [RegionServerTracker-0 {}] master.ServerManager(652): Cluster shutdown set; 7d4f3b9a7081,42679,1733173709184 expired; onlineServers=1 2024-12-02T21:09:16,344 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.meta.1733173708307.meta 2024-12-02T21:09:16,345 WARN [IPC Server handler 3 on default port 40413 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.meta.1733173708307.meta has not been closed. Lease recovery is in progress. RecoveryId = 1054 for block blk_1073741834_1010 2024-12-02T21:09:16,345 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.meta.1733173708307.meta after 1ms 2024-12-02T21:09:16,347 DEBUG [RS_CLOSE_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/data/default/TestLogRolling-testLogRollOnDatanodeDeath/9eb28e6fd3caef0514628f72efd96120/.tmp/info/ed67bae6bfee44aeb12e7f7b85659e6e as hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/data/default/TestLogRolling-testLogRollOnDatanodeDeath/9eb28e6fd3caef0514628f72efd96120/info/ed67bae6bfee44aeb12e7f7b85659e6e 2024-12-02T21:09:16,353 INFO [RS_CLOSE_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/data/default/TestLogRolling-testLogRollOnDatanodeDeath/9eb28e6fd3caef0514628f72efd96120/info/ed67bae6bfee44aeb12e7f7b85659e6e, entries=9, sequenceid=36, filesize=14.3 K 2024-12-02T21:09:16,354 INFO [RS_CLOSE_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3040): Finished flush of dataSize ~9.46 KB/9684, heapSize ~10.36 KB/10608, currentSize=0 B/0 for 9eb28e6fd3caef0514628f72efd96120 in 32ms, sequenceid=36, compaction requested=true 2024-12-02T21:09:16,357 DEBUG [RS_CLOSE_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/data/default/TestLogRolling-testLogRollOnDatanodeDeath/9eb28e6fd3caef0514628f72efd96120/recovered.edits/39.seqid, newMaxSeqId=39, maxSeqId=1 2024-12-02T21:09:16,358 INFO [RS_CLOSE_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1922): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1733173709298.9eb28e6fd3caef0514628f72efd96120. 2024-12-02T21:09:16,358 DEBUG [RS_CLOSE_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1635): Region close journal for 9eb28e6fd3caef0514628f72efd96120: 2024-12-02T21:09:16,358 DEBUG [RS_CLOSE_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1733173709298.9eb28e6fd3caef0514628f72efd96120. 2024-12-02T21:09:16,358 DEBUG [RS_CLOSE_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1681): Closing c3ed35c3efec05998439d93744730396, disabling compactions & flushes 2024-12-02T21:09:16,359 INFO [RS_CLOSE_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1703): Closing region hbase:namespace,,1733173708405.c3ed35c3efec05998439d93744730396. 2024-12-02T21:09:16,359 DEBUG [RS_CLOSE_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:namespace,,1733173708405.c3ed35c3efec05998439d93744730396. 2024-12-02T21:09:16,359 DEBUG [RS_CLOSE_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1791): Acquired close lock on hbase:namespace,,1733173708405.c3ed35c3efec05998439d93744730396. after waiting 0 ms 2024-12-02T21:09:16,359 DEBUG [RS_CLOSE_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1801): Updates disabled for region hbase:namespace,,1733173708405.c3ed35c3efec05998439d93744730396. 2024-12-02T21:09:16,359 DEBUG [RS_CLOSE_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1635): Region close journal for c3ed35c3efec05998439d93744730396: 2024-12-02T21:09:16,359 DEBUG [RS_CLOSE_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionServer(2803): Abort already in progress. Ignoring the current request with reason: Unrecoverable exception while closing hbase:namespace,,1733173708405.c3ed35c3efec05998439d93744730396. 2024-12-02T21:09:16,436 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42679-0x1019929a40e0003, quorum=127.0.0.1:58708, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-02T21:09:16,436 INFO [RS:1;7d4f3b9a7081:42679 {}] regionserver.HRegionServer(1307): Exiting; stopping=7d4f3b9a7081,42679,1733173709184; zookeeper connection closed. 2024-12-02T21:09:16,436 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42679-0x1019929a40e0003, quorum=127.0.0.1:58708, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-02T21:09:16,436 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@1ca4c705 {}] hbase.MiniHBaseCluster$SingleFileSystemShutdownThread(216): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@1ca4c705 2024-12-02T21:09:16,523 INFO [RS:0;7d4f3b9a7081:46239 {}] regionserver.HRegionServer(3579): Received CLOSE for 1588230740 2024-12-02T21:09:16,523 INFO [RS:0;7d4f3b9a7081:46239 {}] regionserver.HRegionServer(3579): Received CLOSE for c3ed35c3efec05998439d93744730396 2024-12-02T21:09:16,523 DEBUG [RS:0;7d4f3b9a7081:46239 {}] regionserver.HRegionServer(1629): Waiting on 1588230740, c3ed35c3efec05998439d93744730396 2024-12-02T21:09:16,523 DEBUG [RS_CLOSE_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1681): Closing 1588230740, disabling compactions & flushes 2024-12-02T21:09:16,523 DEBUG [RS_CLOSE_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1681): Closing c3ed35c3efec05998439d93744730396, disabling compactions & flushes 2024-12-02T21:09:16,523 INFO [RS_CLOSE_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1703): Closing region hbase:meta,,1.1588230740 2024-12-02T21:09:16,523 INFO [RS_CLOSE_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1703): Closing region hbase:namespace,,1733173708405.c3ed35c3efec05998439d93744730396. 2024-12-02T21:09:16,523 DEBUG [RS_CLOSE_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:namespace,,1733173708405.c3ed35c3efec05998439d93744730396. 2024-12-02T21:09:16,523 DEBUG [RS_CLOSE_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:meta,,1.1588230740 2024-12-02T21:09:16,523 DEBUG [RS_CLOSE_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1791): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-02T21:09:16,523 DEBUG [RS_CLOSE_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1791): Acquired close lock on hbase:namespace,,1733173708405.c3ed35c3efec05998439d93744730396. after waiting 0 ms 2024-12-02T21:09:16,523 DEBUG [RS_CLOSE_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1801): Updates disabled for region hbase:namespace,,1733173708405.c3ed35c3efec05998439d93744730396. 2024-12-02T21:09:16,523 DEBUG [RS_CLOSE_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1801): Updates disabled for region hbase:meta,,1.1588230740 2024-12-02T21:09:16,523 DEBUG [RS_CLOSE_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1635): Region close journal for c3ed35c3efec05998439d93744730396: 2024-12-02T21:09:16,523 DEBUG [RS_CLOSE_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1635): Region close journal for 1588230740: 2024-12-02T21:09:16,523 DEBUG [RS_CLOSE_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionServer(2803): Abort already in progress. Ignoring the current request with reason: Unrecoverable exception while closing hbase:meta,,1.1588230740 2024-12-02T21:09:16,523 DEBUG [RS_CLOSE_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionServer(2803): Abort already in progress. Ignoring the current request with reason: Unrecoverable exception while closing hbase:namespace,,1733173708405.c3ed35c3efec05998439d93744730396. 2024-12-02T21:09:16,687 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(818): Archiving hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.1733173732646 to hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/oldWALs/7d4f3b9a7081%2C46239%2C1733173707323.1733173732646 2024-12-02T21:09:16,692 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(818): Archiving hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.1733173736748 to hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/oldWALs/7d4f3b9a7081%2C46239%2C1733173707323.1733173736748 2024-12-02T21:09:16,695 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(818): Archiving hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.1733173736769 to hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/oldWALs/7d4f3b9a7081%2C46239%2C1733173707323.1733173736769 2024-12-02T21:09:16,723 INFO [RS:0;7d4f3b9a7081:46239 {}] regionserver.HRegionServer(1624): We were exiting though online regions are not empty, because some regions failed closing 2024-12-02T21:09:16,723 INFO [RS:0;7d4f3b9a7081:46239 {}] regionserver.HRegionServer(1250): stopping server 7d4f3b9a7081,46239,1733173707323; all regions closed. 2024-12-02T21:09:16,724 DEBUG [WAL-Shutdown-0 {}] wal.FSHLog(499): Closing WAL writer in /user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323 2024-12-02T21:09:16,728 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37629 is added to blk_1073741868_1052 (size=93) 2024-12-02T21:09:16,729 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45501 is added to blk_1073741868_1052 (size=93) 2024-12-02T21:09:16,749 INFO [regionserver/7d4f3b9a7081:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-12-02T21:09:16,749 INFO [regionserver/7d4f3b9a7081:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-12-02T21:09:17,272 INFO [regionserver/7d4f3b9a7081:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-02T21:09:17,439 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@3556b774[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:37629, datanodeUuid=6813777f-477b-4558-9cef-15f971230ace, infoPort=35585, infoSecurePort=0, ipcPort=38151, storageInfo=lv=-57;cid=testClusterID;nsid=1125043526;c=1733173705684):Failed to transfer BP-967350915-172.17.0.2-1733173705684:blk_1073741857_1040 to 127.0.0.1:34327 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T21:09:17,640 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@65249791 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-967350915-172.17.0.2-1733173705684:blk_1073741830_1006, datanode=DatanodeInfoWithStorage[127.0.0.1:42239,null,null]) java.net.ConnectException: Call From 7d4f3b9a7081/172.17.0.2 to localhost:39531 failed on connection exception: java.net.ConnectException: Connection refused; For more details see: http://wiki.apache.org/hadoop/ConnectionRefused at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:876) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:668) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-12-02T21:09:17,710 INFO [regionserver/7d4f3b9a7081:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-02T21:09:20,346 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.meta.1733173708307.meta after 4002ms 2024-12-02T21:09:20,618 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@1d10fd5f[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:45501, datanodeUuid=e2a33afa-21be-4db5-98c1-a8e5ef378ea1, infoPort=39357, infoSecurePort=0, ipcPort=36689, storageInfo=lv=-57;cid=testClusterID;nsid=1125043526;c=1733173705684):Failed to transfer BP-967350915-172.17.0.2-1733173705684:blk_1073741830_1045 to 127.0.0.1:41471 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T21:09:21,359 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:09:21,368 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:09:21,371 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:09:21,371 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:09:21,376 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:09:21,376 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:09:21,730 ERROR [WAL-Shutdown-0 {}] wal.FSHLog(508): We have waited 5 seconds but the close of writer(s) doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.fshlog.wait.on.shutdown.seconds" 2024-12-02T21:09:21,731 DEBUG [WAL-Shutdown-0 {}] wal.FSHLog(499): Closing WAL writer in /user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323 2024-12-02T21:09:21,733 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37629 is added to blk_1073741863_1047 (size=13514) 2024-12-02T21:09:21,734 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45501 is added to blk_1073741863_1047 (size=13514) 2024-12-02T21:09:21,735 DEBUG [RS:0;7d4f3b9a7081:46239 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T21:09:21,736 INFO [RS:0;7d4f3b9a7081:46239 {}] regionserver.LeaseManager(133): Closed leases 2024-12-02T21:09:21,736 INFO [RS:0;7d4f3b9a7081:46239 {}] hbase.ChoreService(370): Chore service for: regionserver/7d4f3b9a7081:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS] on shutdown 2024-12-02T21:09:21,736 INFO [regionserver/7d4f3b9a7081:0.logRoller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-12-02T21:09:21,737 INFO [RS:0;7d4f3b9a7081:46239 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:46239 2024-12-02T21:09:21,772 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46239-0x1019929a40e0001, quorum=127.0.0.1:58708, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/7d4f3b9a7081,46239,1733173707323 2024-12-02T21:09:21,772 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36403-0x1019929a40e0000, quorum=127.0.0.1:58708, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-02T21:09:21,823 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [7d4f3b9a7081,46239,1733173707323] 2024-12-02T21:09:21,823 DEBUG [RegionServerTracker-0 {}] master.DeadServer(103): Processing 7d4f3b9a7081,46239,1733173707323; numProcessing=2 2024-12-02T21:09:21,865 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/draining/7d4f3b9a7081,46239,1733173707323 already deleted, retry=false 2024-12-02T21:09:21,865 INFO [RegionServerTracker-0 {}] master.ServerManager(652): Cluster shutdown set; 7d4f3b9a7081,46239,1733173707323 expired; onlineServers=0 2024-12-02T21:09:21,865 INFO [RegionServerTracker-0 {}] regionserver.HRegionServer(2561): ***** STOPPING region server '7d4f3b9a7081,36403,1733173707177' ***** 2024-12-02T21:09:21,865 INFO [RegionServerTracker-0 {}] regionserver.HRegionServer(2575): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-02T21:09:21,866 DEBUG [M:0;7d4f3b9a7081:36403 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3cc143f3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=7d4f3b9a7081/172.17.0.2:0 2024-12-02T21:09:21,866 INFO [M:0;7d4f3b9a7081:36403 {}] regionserver.HRegionServer(1224): stopping server 7d4f3b9a7081,36403,1733173707177 2024-12-02T21:09:21,866 INFO [M:0;7d4f3b9a7081:36403 {}] regionserver.HRegionServer(1250): stopping server 7d4f3b9a7081,36403,1733173707177; all regions closed. 2024-12-02T21:09:21,866 DEBUG [M:0;7d4f3b9a7081:36403 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T21:09:21,866 DEBUG [M:0;7d4f3b9a7081:36403 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-02T21:09:21,867 DEBUG [M:0;7d4f3b9a7081:36403 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-02T21:09:21,867 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-02T21:09:21,867 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster-HFileCleaner.large.0-1733173707621 {}] cleaner.HFileCleaner(306): Exit Thread[master/7d4f3b9a7081:0:becomeActiveMaster-HFileCleaner.large.0-1733173707621,5,FailOnTimeoutGroup] 2024-12-02T21:09:21,867 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster-HFileCleaner.small.0-1733173707621 {}] cleaner.HFileCleaner(306): Exit Thread[master/7d4f3b9a7081:0:becomeActiveMaster-HFileCleaner.small.0-1733173707621,5,FailOnTimeoutGroup] 2024-12-02T21:09:21,867 INFO [M:0;7d4f3b9a7081:36403 {}] hbase.ChoreService(370): Chore service for: master/7d4f3b9a7081:0 had [] on shutdown 2024-12-02T21:09:21,868 DEBUG [M:0;7d4f3b9a7081:36403 {}] master.HMaster(1733): Stopping service threads 2024-12-02T21:09:21,868 INFO [M:0;7d4f3b9a7081:36403 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-02T21:09:21,869 INFO [M:0;7d4f3b9a7081:36403 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-02T21:09:21,869 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-02T21:09:21,878 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36403-0x1019929a40e0000, quorum=127.0.0.1:58708, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-02T21:09:21,878 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36403-0x1019929a40e0000, quorum=127.0.0.1:58708, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T21:09:21,878 DEBUG [M:0;7d4f3b9a7081:36403 {}] zookeeper.ZKUtil(347): master:36403-0x1019929a40e0000, quorum=127.0.0.1:58708, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-02T21:09:21,878 WARN [M:0;7d4f3b9a7081:36403 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-02T21:09:21,878 INFO [M:0;7d4f3b9a7081:36403 {}] assignment.AssignmentManager(391): Stopping assignment manager 2024-12-02T21:09:21,878 INFO [M:0;7d4f3b9a7081:36403 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-02T21:09:21,878 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-02T21:09:21,878 DEBUG [M:0;7d4f3b9a7081:36403 {}] regionserver.HRegion(1681): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-02T21:09:21,878 INFO [M:0;7d4f3b9a7081:36403 {}] regionserver.HRegion(1703): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-02T21:09:21,878 DEBUG [M:0;7d4f3b9a7081:36403 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-02T21:09:21,878 DEBUG [M:0;7d4f3b9a7081:36403 {}] regionserver.HRegion(1791): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-02T21:09:21,878 DEBUG [M:0;7d4f3b9a7081:36403 {}] regionserver.HRegion(1801): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-02T21:09:21,878 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:36403-0x1019929a40e0000, quorum=127.0.0.1:58708, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-02T21:09:21,879 INFO [M:0;7d4f3b9a7081:36403 {}] regionserver.HRegion(2837): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=40.08 KB heapSize=49.29 KB 2024-12-02T21:09:21,880 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:09:21,894 DEBUG [M:0;7d4f3b9a7081:36403 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/fbf61fb5b7f7455492afc3ccceca1767 is 82, key is hbase:meta,,1/info:regioninfo/1733173708333/Put/seqid=0 2024-12-02T21:09:21,898 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:09:21,899 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:09:21,901 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45501 is added to blk_1073741870_1055 (size=5672) 2024-12-02T21:09:21,901 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37629 is added to blk_1073741870_1055 (size=5672) 2024-12-02T21:09:21,902 INFO [M:0;7d4f3b9a7081:36403 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=97 (bloomFilter=true), to=hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/fbf61fb5b7f7455492afc3ccceca1767 2024-12-02T21:09:21,902 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:09:21,902 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:09:21,903 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:09:21,920 DEBUG [M:0;7d4f3b9a7081:36403 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/783c9398915844d7b52363ad2c2c927a is 774, key is \x00\x00\x00\x00\x00\x00\x00\x09/proc:d/1733173709743/Put/seqid=0 2024-12-02T21:09:21,923 INFO [RS:0;7d4f3b9a7081:46239 {}] regionserver.HRegionServer(1307): Exiting; stopping=7d4f3b9a7081,46239,1733173707323; zookeeper connection closed. 2024-12-02T21:09:21,923 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46239-0x1019929a40e0001, quorum=127.0.0.1:58708, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-02T21:09:21,923 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46239-0x1019929a40e0001, quorum=127.0.0.1:58708, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-02T21:09:21,923 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@16052f73 {}] hbase.MiniHBaseCluster$SingleFileSystemShutdownThread(216): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@16052f73 2024-12-02T21:09:21,924 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 2 regionserver(s) complete 2024-12-02T21:09:21,925 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45501 is added to blk_1073741871_1056 (size=7465) 2024-12-02T21:09:21,925 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37629 is added to blk_1073741871_1056 (size=7465) 2024-12-02T21:09:21,925 INFO [M:0;7d4f3b9a7081:36403 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=39.41 KB at sequenceid=97 (bloomFilter=true), to=hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/783c9398915844d7b52363ad2c2c927a 2024-12-02T21:09:21,943 DEBUG [M:0;7d4f3b9a7081:36403 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/594504657f254c2aaaec9e5202ddd264 is 69, key is 7d4f3b9a7081,42679,1733173709184/rs:state/1733173709244/Put/seqid=0 2024-12-02T21:09:21,952 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45501 is added to blk_1073741872_1057 (size=5224) 2024-12-02T21:09:21,952 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37629 is added to blk_1073741872_1057 (size=5224) 2024-12-02T21:09:21,953 INFO [M:0;7d4f3b9a7081:36403 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=130 B at sequenceid=97 (bloomFilter=true), to=hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/594504657f254c2aaaec9e5202ddd264 2024-12-02T21:09:21,971 DEBUG [M:0;7d4f3b9a7081:36403 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/def37630f73543c797f59ddd18c7610d is 52, key is load_balancer_on/state:d/1733173709168/Put/seqid=0 2024-12-02T21:09:21,975 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37629 is added to blk_1073741873_1058 (size=5056) 2024-12-02T21:09:21,975 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45501 is added to blk_1073741873_1058 (size=5056) 2024-12-02T21:09:21,976 INFO [M:0;7d4f3b9a7081:36403 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=97 (bloomFilter=true), to=hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/def37630f73543c797f59ddd18c7610d 2024-12-02T21:09:21,981 DEBUG [M:0;7d4f3b9a7081:36403 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/fbf61fb5b7f7455492afc3ccceca1767 as hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/fbf61fb5b7f7455492afc3ccceca1767 2024-12-02T21:09:21,986 INFO [M:0;7d4f3b9a7081:36403 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/fbf61fb5b7f7455492afc3ccceca1767, entries=8, sequenceid=97, filesize=5.5 K 2024-12-02T21:09:21,988 DEBUG [M:0;7d4f3b9a7081:36403 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/783c9398915844d7b52363ad2c2c927a as hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/783c9398915844d7b52363ad2c2c927a 2024-12-02T21:09:21,993 INFO [M:0;7d4f3b9a7081:36403 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/783c9398915844d7b52363ad2c2c927a, entries=11, sequenceid=97, filesize=7.3 K 2024-12-02T21:09:21,994 DEBUG [M:0;7d4f3b9a7081:36403 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/594504657f254c2aaaec9e5202ddd264 as hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/594504657f254c2aaaec9e5202ddd264 2024-12-02T21:09:21,999 INFO [M:0;7d4f3b9a7081:36403 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/594504657f254c2aaaec9e5202ddd264, entries=2, sequenceid=97, filesize=5.1 K 2024-12-02T21:09:22,000 DEBUG [M:0;7d4f3b9a7081:36403 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/def37630f73543c797f59ddd18c7610d as hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/def37630f73543c797f59ddd18c7610d 2024-12-02T21:09:22,005 INFO [M:0;7d4f3b9a7081:36403 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/def37630f73543c797f59ddd18c7610d, entries=1, sequenceid=97, filesize=4.9 K 2024-12-02T21:09:22,006 INFO [M:0;7d4f3b9a7081:36403 {}] regionserver.HRegion(3040): Finished flush of dataSize ~40.08 KB/41039, heapSize ~49.23 KB/50408, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 128ms, sequenceid=97, compaction requested=false 2024-12-02T21:09:22,007 INFO [M:0;7d4f3b9a7081:36403 {}] regionserver.HRegion(1922): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-02T21:09:22,007 DEBUG [M:0;7d4f3b9a7081:36403 {}] regionserver.HRegion(1635): Region close journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-02T21:09:22,008 DEBUG [WAL-Shutdown-0 {}] wal.FSHLog(499): Closing WAL writer in /user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/MasterData/WALs/7d4f3b9a7081,36403,1733173707177 2024-12-02T21:09:22,010 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37629 is added to blk_1073741861_1044 (size=757) 2024-12-02T21:09:22,010 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45501 is added to blk_1073741861_1044 (size=757) 2024-12-02T21:09:22,348 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.meta.1733173708307.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:09:22,411 INFO [M:0;7d4f3b9a7081:36403 {}] flush.MasterFlushTableProcedureManager(91): stop: server shutting down. 2024-12-02T21:09:22,411 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-12-02T21:09:22,411 INFO [M:0;7d4f3b9a7081:36403 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:36403 2024-12-02T21:09:22,465 DEBUG [M:0;7d4f3b9a7081:36403 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/rs/7d4f3b9a7081,36403,1733173707177 already deleted, retry=false 2024-12-02T21:09:22,578 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36403-0x1019929a40e0000, quorum=127.0.0.1:58708, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-02T21:09:22,578 INFO [M:0;7d4f3b9a7081:36403 {}] regionserver.HRegionServer(1307): Exiting; stopping=7d4f3b9a7081,36403,1733173707177; zookeeper connection closed. 2024-12-02T21:09:22,578 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36403-0x1019929a40e0000, quorum=127.0.0.1:58708, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-02T21:09:22,580 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7708a86b{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-02T21:09:22,580 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6fc6c011{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-02T21:09:22,580 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-02T21:09:22,581 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4d900df1{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-02T21:09:22,581 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1b3886a5{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/0a82523b-981c-e30f-8f22-ae7c797f906e/hadoop.log.dir/,STOPPED} 2024-12-02T21:09:22,582 WARN [BP-967350915-172.17.0.2-1733173705684 heartbeating to localhost/127.0.0.1:40413 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-02T21:09:22,582 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@74e0458e {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-967350915-172.17.0.2-1733173705684:blk_1073741834_1010, datanode=DatanodeInfoWithStorage[127.0.0.1:42239,null,null]) java.io.InterruptedIOException: DestHost:destPort localhost:39531 , LocalHost:localPort 7d4f3b9a7081/172.17.0.2:0. Failed on local exception: java.io.InterruptedIOException: Interrupted: action=RetryAction(action=RETRY, delayMillis=1000, reason=retries get failed due to exceeded maximum allowed retries number: 10), retry policy=RetryUpToMaximumCountWithFixedSleep(maxRetries=10, sleepTime=1000 MILLISECONDS) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:936) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.InterruptedIOException: Interrupted: action=RetryAction(action=RETRY, delayMillis=1000, reason=retries get failed due to exceeded maximum allowed retries number: 10), retry policy=RetryUpToMaximumCountWithFixedSleep(maxRetries=10, sleepTime=1000 MILLISECONDS) at org.apache.hadoop.ipc.Client$Connection.handleConnectionFailure(Client.java:963) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:691) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more Caused by: java.lang.InterruptedException: sleep interrupted at java.lang.Thread.sleep(Native Method) ~[?:?] at org.apache.hadoop.ipc.Client$Connection.handleConnectionFailure(Client.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:691) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-12-02T21:09:22,582 WARN [BP-967350915-172.17.0.2-1733173705684 heartbeating to localhost/127.0.0.1:40413 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-967350915-172.17.0.2-1733173705684 (Datanode Uuid e2a33afa-21be-4db5-98c1-a8e5ef378ea1) service to localhost/127.0.0.1:40413 2024-12-02T21:09:22,582 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-02T21:09:22,582 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-02T21:09:22,583 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/0a82523b-981c-e30f-8f22-ae7c797f906e/cluster_3abf8b3b-f6f4-828c-a519-1f257913387f/dfs/data/data3/current/BP-967350915-172.17.0.2-1733173705684 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-02T21:09:22,583 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/0a82523b-981c-e30f-8f22-ae7c797f906e/cluster_3abf8b3b-f6f4-828c-a519-1f257913387f/dfs/data/data4/current/BP-967350915-172.17.0.2-1733173705684 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-02T21:09:22,583 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-02T21:09:22,583 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@74e0458e {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(293): Failed to updateBlock (newblock=BP-967350915-172.17.0.2-1733173705684:blk_1073741834_1054, datanode=DatanodeInfoWithStorage[127.0.0.1:45501,null,null]) org.apache.hadoop.hdfs.server.datanode.ReplicaNotFoundException: Replica not found for BP-967350915-172.17.0.2-1733173705684:blk_1073741834_1010[numBytes=3714,originalReplicaState=RWR]. The block may have been removed recently by the balancer or by intentionally reducing the replication factor. This condition is usually harmless. To be certain, please check the preceding datanode log messages for signs of a more serious issue. at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.updateReplicaUnderRecovery(FsDatasetImpl.java:3104) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode.updateReplicaUnderRecovery(DataNode.java:3537) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$BlockRecord.updateReplicaUnderRecovery(BlockRecoveryWorker.java:88) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$BlockRecord.access$700(BlockRecoveryWorker.java:71) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.syncBlock(BlockRecoveryWorker.java:289) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:183) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T21:09:22,583 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@74e0458e {}] datanode.BlockRecoveryWorker$1(605): recover Block: RecoveringBlock{BP-967350915-172.17.0.2-1733173705684:blk_1073741834_1010; getBlockSize()=85; corrupt=false; offset=-1; locs=[DatanodeInfoWithStorage[127.0.0.1:45501,null,null], DatanodeInfoWithStorage[127.0.0.1:42239,null,null]]; cachedLocs=[]} FAILED: java.io.IOException: Cannot recover BP-967350915-172.17.0.2-1733173705684:blk_1073741834_1010, the following datanodes failed: [DatanodeInfoWithStorage[127.0.0.1:45501,null,null]] 2024-12-02T21:09:22,585 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@15a5ecfd{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-02T21:09:22,586 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3c8e07c0{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-02T21:09:22,586 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-02T21:09:22,586 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1f3582bb{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-02T21:09:22,586 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@11d5ee62{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/0a82523b-981c-e30f-8f22-ae7c797f906e/hadoop.log.dir/,STOPPED} 2024-12-02T21:09:22,587 WARN [BP-967350915-172.17.0.2-1733173705684 heartbeating to localhost/127.0.0.1:40413 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-02T21:09:22,587 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-02T21:09:22,587 WARN [BP-967350915-172.17.0.2-1733173705684 heartbeating to localhost/127.0.0.1:40413 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-967350915-172.17.0.2-1733173705684 (Datanode Uuid 6813777f-477b-4558-9cef-15f971230ace) service to localhost/127.0.0.1:40413 2024-12-02T21:09:22,587 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-02T21:09:22,588 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/0a82523b-981c-e30f-8f22-ae7c797f906e/cluster_3abf8b3b-f6f4-828c-a519-1f257913387f/dfs/data/data9/current/BP-967350915-172.17.0.2-1733173705684 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-02T21:09:22,588 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/0a82523b-981c-e30f-8f22-ae7c797f906e/cluster_3abf8b3b-f6f4-828c-a519-1f257913387f/dfs/data/data10/current/BP-967350915-172.17.0.2-1733173705684 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-02T21:09:22,588 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-02T21:09:22,595 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@643c44f2{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-02T21:09:22,595 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@72ab5857{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-02T21:09:22,595 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-02T21:09:22,595 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1d621144{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-02T21:09:22,595 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@58ba2427{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/0a82523b-981c-e30f-8f22-ae7c797f906e/hadoop.log.dir/,STOPPED} 2024-12-02T21:09:22,602 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(345): Shutdown MiniZK cluster with all ZK servers 2024-12-02T21:09:22,628 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1347): Minicluster is down 2024-12-02T21:09:22,634 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnDatanodeDeath Thread=88 (was 65) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:40413 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (356647748) connection to localhost/127.0.0.1:40413 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-15-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-17-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS-EventLoopGroup-7-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:40413 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS-EventLoopGroup-7-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Timer for 'DataNode' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: RS-EventLoopGroup-6-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS-EventLoopGroup-5-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:40413 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:40413 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.1@localhost:40413 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-14-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS-EventLoopGroup-7-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (356647748) connection to localhost/127.0.0.1:40413 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: LeaseRenewer:jenkins.hfs.2@localhost:40413 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-16-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (356647748) connection to localhost/127.0.0.1:40413 from jenkins.hfs.1 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-16-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-14-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Close-WAL-Writer-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:175) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$$Lambda$797/0x00007fde04b83870.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-16-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Abort regionserver monitor java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: RS-EventLoopGroup-6-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-17-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-15-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS-EventLoopGroup-5-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS-EventLoopGroup-5-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-17-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS-EventLoopGroup-6-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-4 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-14-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-15-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=424 (was 406) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=49 (was 63), ProcessCount=11 (was 11), AvailableMemoryMB=6919 (was 7656) 2024-12-02T21:09:22,640 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnPipelineRestart Thread=88, OpenFileDescriptor=424, MaxFileDescriptor=1048576, SystemLoadAverage=49, ProcessCount=11, AvailableMemoryMB=6919 2024-12-02T21:09:22,640 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1126): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-02T21:09:22,640 INFO [Time-limited test {}] hbase.HBaseTestingUtility(451): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/0a82523b-981c-e30f-8f22-ae7c797f906e/hadoop.log.dir so I do NOT create it in target/test-data/6022e80c-ee5e-76b3-2ef8-b676205a65e1 2024-12-02T21:09:22,640 INFO [Time-limited test {}] hbase.HBaseTestingUtility(451): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/0a82523b-981c-e30f-8f22-ae7c797f906e/hadoop.tmp.dir so I do NOT create it in target/test-data/6022e80c-ee5e-76b3-2ef8-b676205a65e1 2024-12-02T21:09:22,640 INFO [Time-limited test {}] hbase.HBaseZKTestingUtility(82): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/6022e80c-ee5e-76b3-2ef8-b676205a65e1/cluster_a36dc0da-24c9-4660-2142-0a0442b4566b, deleteOnExit=true 2024-12-02T21:09:22,640 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1140): STARTING DFS 2024-12-02T21:09:22,641 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/6022e80c-ee5e-76b3-2ef8-b676205a65e1/test.cache.data in system properties and HBase conf 2024-12-02T21:09:22,641 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/6022e80c-ee5e-76b3-2ef8-b676205a65e1/hadoop.tmp.dir in system properties and HBase conf 2024-12-02T21:09:22,641 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/6022e80c-ee5e-76b3-2ef8-b676205a65e1/hadoop.log.dir in system properties and HBase conf 2024-12-02T21:09:22,641 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/6022e80c-ee5e-76b3-2ef8-b676205a65e1/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-02T21:09:22,641 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/6022e80c-ee5e-76b3-2ef8-b676205a65e1/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-02T21:09:22,641 INFO [Time-limited test {}] hbase.HBaseTestingUtility(811): read short circuit is OFF 2024-12-02T21:09:22,641 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-02T21:09:22,642 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/6022e80c-ee5e-76b3-2ef8-b676205a65e1/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-02T21:09:22,642 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/6022e80c-ee5e-76b3-2ef8-b676205a65e1/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-02T21:09:22,642 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/6022e80c-ee5e-76b3-2ef8-b676205a65e1/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-02T21:09:22,642 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/6022e80c-ee5e-76b3-2ef8-b676205a65e1/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-02T21:09:22,642 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/6022e80c-ee5e-76b3-2ef8-b676205a65e1/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-02T21:09:22,642 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/6022e80c-ee5e-76b3-2ef8-b676205a65e1/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-02T21:09:22,642 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/6022e80c-ee5e-76b3-2ef8-b676205a65e1/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-02T21:09:22,642 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/6022e80c-ee5e-76b3-2ef8-b676205a65e1/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-02T21:09:22,642 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/6022e80c-ee5e-76b3-2ef8-b676205a65e1/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-02T21:09:22,642 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/6022e80c-ee5e-76b3-2ef8-b676205a65e1/nfs.dump.dir in system properties and HBase conf 2024-12-02T21:09:22,642 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/6022e80c-ee5e-76b3-2ef8-b676205a65e1/java.io.tmpdir in system properties and HBase conf 2024-12-02T21:09:22,642 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/6022e80c-ee5e-76b3-2ef8-b676205a65e1/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-02T21:09:22,642 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/6022e80c-ee5e-76b3-2ef8-b676205a65e1/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-02T21:09:22,642 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/6022e80c-ee5e-76b3-2ef8-b676205a65e1/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-02T21:09:22,655 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-02T21:09:22,905 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-02T21:09:22,909 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-02T21:09:22,910 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-02T21:09:22,910 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-02T21:09:22,910 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-02T21:09:22,911 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-02T21:09:22,911 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@aa7e0a3{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/6022e80c-ee5e-76b3-2ef8-b676205a65e1/hadoop.log.dir/,AVAILABLE} 2024-12-02T21:09:22,912 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@721f1cbb{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-02T21:09:23,000 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@29709a78{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/6022e80c-ee5e-76b3-2ef8-b676205a65e1/java.io.tmpdir/jetty-localhost-36643-hadoop-hdfs-3_4_1-tests_jar-_-any-3498898185869379966/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-02T21:09:23,000 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@26116fb5{HTTP/1.1, (http/1.1)}{localhost:36643} 2024-12-02T21:09:23,000 INFO [Time-limited test {}] server.Server(415): Started @174380ms 2024-12-02T21:09:23,011 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-02T21:09:23,193 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-02T21:09:23,196 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-02T21:09:23,197 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-02T21:09:23,197 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-02T21:09:23,197 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-02T21:09:23,197 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6d7ae91e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/6022e80c-ee5e-76b3-2ef8-b676205a65e1/hadoop.log.dir/,AVAILABLE} 2024-12-02T21:09:23,198 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5e9bbc9d{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-02T21:09:23,285 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7516fc94{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/6022e80c-ee5e-76b3-2ef8-b676205a65e1/java.io.tmpdir/jetty-localhost-46663-hadoop-hdfs-3_4_1-tests_jar-_-any-12832307523557940101/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-02T21:09:23,286 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@1df7a86c{HTTP/1.1, (http/1.1)}{localhost:46663} 2024-12-02T21:09:23,286 INFO [Time-limited test {}] server.Server(415): Started @174666ms 2024-12-02T21:09:23,287 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-02T21:09:23,311 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-02T21:09:23,316 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-02T21:09:23,316 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-02T21:09:23,317 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-02T21:09:23,317 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-02T21:09:23,317 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@26fd3d4f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/6022e80c-ee5e-76b3-2ef8-b676205a65e1/hadoop.log.dir/,AVAILABLE} 2024-12-02T21:09:23,317 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@580d9f1c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-02T21:09:23,349 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.meta.1733173708307.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:09:23,409 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5c0f1ef4{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/6022e80c-ee5e-76b3-2ef8-b676205a65e1/java.io.tmpdir/jetty-localhost-43797-hadoop-hdfs-3_4_1-tests_jar-_-any-9453140731492766427/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-02T21:09:23,409 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3a1e6fd8{HTTP/1.1, (http/1.1)}{localhost:43797} 2024-12-02T21:09:23,409 INFO [Time-limited test {}] server.Server(415): Started @174789ms 2024-12-02T21:09:23,410 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-02T21:09:23,833 WARN [Thread-940 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/6022e80c-ee5e-76b3-2ef8-b676205a65e1/cluster_a36dc0da-24c9-4660-2142-0a0442b4566b/dfs/data/data1/current/BP-1649102533-172.17.0.2-1733173762666/current, will proceed with Du for space computation calculation, 2024-12-02T21:09:23,833 WARN [Thread-941 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/6022e80c-ee5e-76b3-2ef8-b676205a65e1/cluster_a36dc0da-24c9-4660-2142-0a0442b4566b/dfs/data/data2/current/BP-1649102533-172.17.0.2-1733173762666/current, will proceed with Du for space computation calculation, 2024-12-02T21:09:23,852 WARN [Thread-904 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-02T21:09:23,854 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x5bb85c253459d300 with lease ID 0x7c8b4e6cc8a01643: Processing first storage report for DS-020c589a-a637-4a0b-91c5-4976ea7c1c25 from datanode DatanodeRegistration(127.0.0.1:44711, datanodeUuid=6d3579fa-125d-47cd-85cc-8fd66e40db23, infoPort=40199, infoSecurePort=0, ipcPort=32979, storageInfo=lv=-57;cid=testClusterID;nsid=630606353;c=1733173762666) 2024-12-02T21:09:23,854 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x5bb85c253459d300 with lease ID 0x7c8b4e6cc8a01643: from storage DS-020c589a-a637-4a0b-91c5-4976ea7c1c25 node DatanodeRegistration(127.0.0.1:44711, datanodeUuid=6d3579fa-125d-47cd-85cc-8fd66e40db23, infoPort=40199, infoSecurePort=0, ipcPort=32979, storageInfo=lv=-57;cid=testClusterID;nsid=630606353;c=1733173762666), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-02T21:09:23,854 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x5bb85c253459d300 with lease ID 0x7c8b4e6cc8a01643: Processing first storage report for DS-c87b65bb-8a6c-4814-9307-d7ebddd821fe from datanode DatanodeRegistration(127.0.0.1:44711, datanodeUuid=6d3579fa-125d-47cd-85cc-8fd66e40db23, infoPort=40199, infoSecurePort=0, ipcPort=32979, storageInfo=lv=-57;cid=testClusterID;nsid=630606353;c=1733173762666) 2024-12-02T21:09:23,854 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x5bb85c253459d300 with lease ID 0x7c8b4e6cc8a01643: from storage DS-c87b65bb-8a6c-4814-9307-d7ebddd821fe node DatanodeRegistration(127.0.0.1:44711, datanodeUuid=6d3579fa-125d-47cd-85cc-8fd66e40db23, infoPort=40199, infoSecurePort=0, ipcPort=32979, storageInfo=lv=-57;cid=testClusterID;nsid=630606353;c=1733173762666), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-02T21:09:23,950 WARN [Thread-951 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/6022e80c-ee5e-76b3-2ef8-b676205a65e1/cluster_a36dc0da-24c9-4660-2142-0a0442b4566b/dfs/data/data3/current/BP-1649102533-172.17.0.2-1733173762666/current, will proceed with Du for space computation calculation, 2024-12-02T21:09:23,950 WARN [Thread-952 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/6022e80c-ee5e-76b3-2ef8-b676205a65e1/cluster_a36dc0da-24c9-4660-2142-0a0442b4566b/dfs/data/data4/current/BP-1649102533-172.17.0.2-1733173762666/current, will proceed with Du for space computation calculation, 2024-12-02T21:09:23,968 WARN [Thread-927 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-02T21:09:23,970 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xa17459c78bd9b6e3 with lease ID 0x7c8b4e6cc8a01644: Processing first storage report for DS-a09a5dec-f5bf-45ab-bc95-4ef2c766b66a from datanode DatanodeRegistration(127.0.0.1:46749, datanodeUuid=71ceb9c5-600b-4a8b-97c6-ef649952eebf, infoPort=44183, infoSecurePort=0, ipcPort=40201, storageInfo=lv=-57;cid=testClusterID;nsid=630606353;c=1733173762666) 2024-12-02T21:09:23,970 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xa17459c78bd9b6e3 with lease ID 0x7c8b4e6cc8a01644: from storage DS-a09a5dec-f5bf-45ab-bc95-4ef2c766b66a node DatanodeRegistration(127.0.0.1:46749, datanodeUuid=71ceb9c5-600b-4a8b-97c6-ef649952eebf, infoPort=44183, infoSecurePort=0, ipcPort=40201, storageInfo=lv=-57;cid=testClusterID;nsid=630606353;c=1733173762666), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-02T21:09:23,971 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xa17459c78bd9b6e3 with lease ID 0x7c8b4e6cc8a01644: Processing first storage report for DS-0cc2c10b-820b-4fc4-8adb-2ef253f1b358 from datanode DatanodeRegistration(127.0.0.1:46749, datanodeUuid=71ceb9c5-600b-4a8b-97c6-ef649952eebf, infoPort=44183, infoSecurePort=0, ipcPort=40201, storageInfo=lv=-57;cid=testClusterID;nsid=630606353;c=1733173762666) 2024-12-02T21:09:23,971 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xa17459c78bd9b6e3 with lease ID 0x7c8b4e6cc8a01644: from storage DS-0cc2c10b-820b-4fc4-8adb-2ef253f1b358 node DatanodeRegistration(127.0.0.1:46749, datanodeUuid=71ceb9c5-600b-4a8b-97c6-ef649952eebf, infoPort=44183, infoSecurePort=0, ipcPort=40201, storageInfo=lv=-57;cid=testClusterID;nsid=630606353;c=1733173762666), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-02T21:09:24,008 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath 2024-12-02T21:09:24,008 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-12-02T21:09:24,035 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(703): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/6022e80c-ee5e-76b3-2ef8-b676205a65e1 2024-12-02T21:09:24,038 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(259): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/6022e80c-ee5e-76b3-2ef8-b676205a65e1/cluster_a36dc0da-24c9-4660-2142-0a0442b4566b/zookeeper_0, clientPort=57382, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/6022e80c-ee5e-76b3-2ef8-b676205a65e1/cluster_a36dc0da-24c9-4660-2142-0a0442b4566b/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/6022e80c-ee5e-76b3-2ef8-b676205a65e1/cluster_a36dc0da-24c9-4660-2142-0a0442b4566b/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-02T21:09:24,039 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(284): Started MiniZooKeeperCluster and ran 'stat' on client port=57382 2024-12-02T21:09:24,039 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T21:09:24,041 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T21:09:24,053 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44711 is added to blk_1073741825_1001 (size=7) 2024-12-02T21:09:24,054 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46749 is added to blk_1073741825_1001 (size=7) 2024-12-02T21:09:24,055 INFO [Time-limited test {}] util.FSUtils(490): Created version file at hdfs://localhost:35937/user/jenkins/test-data/cec094ad-8298-4c24-78f9-fb346a53367e with version=8 2024-12-02T21:09:24,055 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1462): The hbase.fs.tmp.dir is set to hdfs://localhost:39905/user/jenkins/test-data/2cbae3b8-f761-787a-953c-0b58f120ed37/hbase-staging 2024-12-02T21:09:24,057 INFO [Time-limited test {}] client.ConnectionUtils(129): master/7d4f3b9a7081:0 server-side Connection retries=45 2024-12-02T21:09:24,057 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-02T21:09:24,057 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-02T21:09:24,057 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-02T21:09:24,057 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-02T21:09:24,057 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-02T21:09:24,057 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-02T21:09:24,057 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-02T21:09:24,058 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:34867 2024-12-02T21:09:24,059 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T21:09:24,060 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T21:09:24,062 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=master:34867 connecting to ZooKeeper ensemble=127.0.0.1:57382 2024-12-02T21:09:24,118 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:348670x0, quorum=127.0.0.1:57382, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-02T21:09:24,119 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:34867-0x101992a823a0000 connected 2024-12-02T21:09:24,178 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:34867-0x101992a823a0000, quorum=127.0.0.1:57382, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-02T21:09:24,179 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:34867-0x101992a823a0000, quorum=127.0.0.1:57382, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-02T21:09:24,179 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:34867-0x101992a823a0000, quorum=127.0.0.1:57382, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-02T21:09:24,180 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=34867 2024-12-02T21:09:24,180 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=34867 2024-12-02T21:09:24,180 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=34867 2024-12-02T21:09:24,181 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=34867 2024-12-02T21:09:24,181 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=34867 2024-12-02T21:09:24,181 INFO [Time-limited test {}] master.HMaster(488): hbase.rootdir=hdfs://localhost:35937/user/jenkins/test-data/cec094ad-8298-4c24-78f9-fb346a53367e, hbase.cluster.distributed=false 2024-12-02T21:09:24,195 INFO [Time-limited test {}] client.ConnectionUtils(129): regionserver/7d4f3b9a7081:0 server-side Connection retries=45 2024-12-02T21:09:24,195 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-02T21:09:24,195 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-02T21:09:24,196 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-02T21:09:24,196 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-02T21:09:24,196 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-02T21:09:24,196 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-02T21:09:24,196 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-02T21:09:24,196 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:46151 2024-12-02T21:09:24,197 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-02T21:09:24,198 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-02T21:09:24,199 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T21:09:24,200 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T21:09:24,202 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=regionserver:46151 connecting to ZooKeeper ensemble=127.0.0.1:57382 2024-12-02T21:09:24,211 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:461510x0, quorum=127.0.0.1:57382, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-02T21:09:24,211 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:46151-0x101992a823a0001 connected 2024-12-02T21:09:24,211 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:46151-0x101992a823a0001, quorum=127.0.0.1:57382, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-02T21:09:24,212 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:46151-0x101992a823a0001, quorum=127.0.0.1:57382, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-02T21:09:24,213 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:46151-0x101992a823a0001, quorum=127.0.0.1:57382, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-02T21:09:24,213 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=46151 2024-12-02T21:09:24,213 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=46151 2024-12-02T21:09:24,214 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=46151 2024-12-02T21:09:24,214 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=46151 2024-12-02T21:09:24,214 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=46151 2024-12-02T21:09:24,215 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] master.HMaster(2445): Adding backup master ZNode /hbase/backup-masters/7d4f3b9a7081,34867,1733173764056 2024-12-02T21:09:24,219 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34867-0x101992a823a0000, quorum=127.0.0.1:57382, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-02T21:09:24,219 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46151-0x101992a823a0001, quorum=127.0.0.1:57382, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-02T21:09:24,220 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:34867-0x101992a823a0000, quorum=127.0.0.1:57382, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/7d4f3b9a7081,34867,1733173764056 2024-12-02T21:09:24,228 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46151-0x101992a823a0001, quorum=127.0.0.1:57382, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-02T21:09:24,228 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34867-0x101992a823a0000, quorum=127.0.0.1:57382, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-02T21:09:24,228 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46151-0x101992a823a0001, quorum=127.0.0.1:57382, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T21:09:24,228 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34867-0x101992a823a0000, quorum=127.0.0.1:57382, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T21:09:24,228 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:34867-0x101992a823a0000, quorum=127.0.0.1:57382, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-02T21:09:24,229 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/7d4f3b9a7081,34867,1733173764056 from backup master directory 2024-12-02T21:09:24,229 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(111): master:34867-0x101992a823a0000, quorum=127.0.0.1:57382, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-02T21:09:24,230 DEBUG [M:0;7d4f3b9a7081:34867 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;7d4f3b9a7081:34867 2024-12-02T21:09:24,236 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34867-0x101992a823a0000, quorum=127.0.0.1:57382, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/7d4f3b9a7081,34867,1733173764056 2024-12-02T21:09:24,236 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46151-0x101992a823a0001, quorum=127.0.0.1:57382, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-02T21:09:24,236 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34867-0x101992a823a0000, quorum=127.0.0.1:57382, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-02T21:09:24,236 WARN [master/7d4f3b9a7081:0:becomeActiveMaster {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-02T21:09:24,236 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=7d4f3b9a7081,34867,1733173764056 2024-12-02T21:09:24,245 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46749 is added to blk_1073741826_1002 (size=42) 2024-12-02T21:09:24,245 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44711 is added to blk_1073741826_1002 (size=42) 2024-12-02T21:09:24,246 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] util.FSUtils(639): Created cluster ID file at hdfs://localhost:35937/user/jenkins/test-data/cec094ad-8298-4c24-78f9-fb346a53367e/hbase.id with ID: fb137951-d5f1-4a04-bddc-e180a9ce3020 2024-12-02T21:09:24,255 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T21:09:24,261 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46151-0x101992a823a0001, quorum=127.0.0.1:57382, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T21:09:24,261 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34867-0x101992a823a0000, quorum=127.0.0.1:57382, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T21:09:24,266 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44711 is added to blk_1073741827_1003 (size=196) 2024-12-02T21:09:24,266 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46749 is added to blk_1073741827_1003 (size=196) 2024-12-02T21:09:24,267 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] region.MasterRegion(372): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-02T21:09:24,268 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-02T21:09:24,268 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-02T21:09:24,275 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44711 is added to blk_1073741828_1004 (size=1189) 2024-12-02T21:09:24,275 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46749 is added to blk_1073741828_1004 (size=1189) 2024-12-02T21:09:24,276 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] regionserver.HRegion(7124): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:35937/user/jenkins/test-data/cec094ad-8298-4c24-78f9-fb346a53367e/MasterData/data/master/store 2024-12-02T21:09:24,283 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44711 is added to blk_1073741829_1005 (size=34) 2024-12-02T21:09:24,283 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46749 is added to blk_1073741829_1005 (size=34) 2024-12-02T21:09:24,283 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] regionserver.HRegion(894): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T21:09:24,284 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] regionserver.HRegion(1681): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-02T21:09:24,284 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] regionserver.HRegion(1703): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-02T21:09:24,284 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] regionserver.HRegion(1724): Waiting without time limit for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-02T21:09:24,284 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] regionserver.HRegion(1791): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-02T21:09:24,284 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] regionserver.HRegion(1801): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-02T21:09:24,284 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] regionserver.HRegion(1922): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-02T21:09:24,284 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] regionserver.HRegion(1635): Region close journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-02T21:09:24,284 WARN [master/7d4f3b9a7081:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:35937/user/jenkins/test-data/cec094ad-8298-4c24-78f9-fb346a53367e/MasterData/data/master/store/.initializing 2024-12-02T21:09:24,285 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:35937/user/jenkins/test-data/cec094ad-8298-4c24-78f9-fb346a53367e/MasterData/WALs/7d4f3b9a7081,34867,1733173764056 2024-12-02T21:09:24,287 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=7d4f3b9a7081%2C34867%2C1733173764056, suffix=, logDir=hdfs://localhost:35937/user/jenkins/test-data/cec094ad-8298-4c24-78f9-fb346a53367e/MasterData/WALs/7d4f3b9a7081,34867,1733173764056, archiveDir=hdfs://localhost:35937/user/jenkins/test-data/cec094ad-8298-4c24-78f9-fb346a53367e/MasterData/oldWALs, maxLogs=10 2024-12-02T21:09:24,288 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7d4f3b9a7081%2C34867%2C1733173764056.1733173764288 2024-12-02T21:09:24,292 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/cec094ad-8298-4c24-78f9-fb346a53367e/MasterData/WALs/7d4f3b9a7081,34867,1733173764056/7d4f3b9a7081%2C34867%2C1733173764056.1733173764288 2024-12-02T21:09:24,292 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40199:40199),(127.0.0.1/127.0.0.1:44183:44183)] 2024-12-02T21:09:24,292 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] regionserver.HRegion(7285): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-02T21:09:24,292 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] regionserver.HRegion(894): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T21:09:24,293 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] regionserver.HRegion(7327): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-02T21:09:24,293 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] regionserver.HRegion(7330): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-02T21:09:24,294 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-02T21:09:24,296 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-02T21:09:24,296 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T21:09:24,297 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-02T21:09:24,297 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-02T21:09:24,298 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-02T21:09:24,298 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T21:09:24,299 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T21:09:24,299 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-02T21:09:24,300 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-02T21:09:24,300 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T21:09:24,300 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T21:09:24,301 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-02T21:09:24,302 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-02T21:09:24,302 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T21:09:24,302 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T21:09:24,303 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:35937/user/jenkins/test-data/cec094ad-8298-4c24-78f9-fb346a53367e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-02T21:09:24,303 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:35937/user/jenkins/test-data/cec094ad-8298-4c24-78f9-fb346a53367e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-02T21:09:24,306 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-02T21:09:24,307 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] regionserver.HRegion(1085): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-02T21:09:24,309 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:35937/user/jenkins/test-data/cec094ad-8298-4c24-78f9-fb346a53367e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-02T21:09:24,309 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] regionserver.HRegion(1102): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=740773, jitterRate=-0.05805906653404236}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-02T21:09:24,310 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] regionserver.HRegion(1001): Region open journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-02T21:09:24,310 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-02T21:09:24,315 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@56326477, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T21:09:24,316 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] master.HMaster(882): No meta location available on zookeeper, skip migrating... 2024-12-02T21:09:24,316 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-02T21:09:24,316 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(633): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-02T21:09:24,316 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-02T21:09:24,317 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(653): Recovered RegionProcedureStore lease in 0 msec 2024-12-02T21:09:24,317 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(667): Loaded RegionProcedureStore in 0 msec 2024-12-02T21:09:24,317 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-02T21:09:24,319 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-02T21:09:24,320 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34867-0x101992a823a0000, quorum=127.0.0.1:57382, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-02T21:09:24,327 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/balancer already deleted, retry=false 2024-12-02T21:09:24,328 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-02T21:09:24,328 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34867-0x101992a823a0000, quorum=127.0.0.1:57382, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-02T21:09:24,336 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/normalizer already deleted, retry=false 2024-12-02T21:09:24,336 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-02T21:09:24,337 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34867-0x101992a823a0000, quorum=127.0.0.1:57382, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-02T21:09:24,344 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/switch/split already deleted, retry=false 2024-12-02T21:09:24,345 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34867-0x101992a823a0000, quorum=127.0.0.1:57382, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-02T21:09:24,350 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.meta.1733173708307.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:09:24,352 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/switch/merge already deleted, retry=false 2024-12-02T21:09:24,355 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34867-0x101992a823a0000, quorum=127.0.0.1:57382, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-02T21:09:24,361 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-02T21:09:24,369 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34867-0x101992a823a0000, quorum=127.0.0.1:57382, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-02T21:09:24,369 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46151-0x101992a823a0001, quorum=127.0.0.1:57382, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-02T21:09:24,369 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34867-0x101992a823a0000, quorum=127.0.0.1:57382, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T21:09:24,369 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46151-0x101992a823a0001, quorum=127.0.0.1:57382, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T21:09:24,370 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] master.HMaster(826): Active/primary master=7d4f3b9a7081,34867,1733173764056, sessionid=0x101992a823a0000, setting cluster-up flag (Was=false) 2024-12-02T21:09:24,394 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46151-0x101992a823a0001, quorum=127.0.0.1:57382, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T21:09:24,394 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34867-0x101992a823a0000, quorum=127.0.0.1:57382, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T21:09:24,444 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-02T21:09:24,445 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=7d4f3b9a7081,34867,1733173764056 2024-12-02T21:09:24,477 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46151-0x101992a823a0001, quorum=127.0.0.1:57382, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T21:09:24,477 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34867-0x101992a823a0000, quorum=127.0.0.1:57382, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T21:09:24,527 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-02T21:09:24,530 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=7d4f3b9a7081,34867,1733173764056 2024-12-02T21:09:24,536 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT; InitMetaProcedure table=hbase:meta 2024-12-02T21:09:24,536 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(575): slop=0.2 2024-12-02T21:09:24,537 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(294): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-02T21:09:24,537 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] balancer.RegionLocationFinder(146): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 7d4f3b9a7081,34867,1733173764056 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-02T21:09:24,537 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/7d4f3b9a7081:0, corePoolSize=5, maxPoolSize=5 2024-12-02T21:09:24,537 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/7d4f3b9a7081:0, corePoolSize=5, maxPoolSize=5 2024-12-02T21:09:24,537 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/7d4f3b9a7081:0, corePoolSize=5, maxPoolSize=5 2024-12-02T21:09:24,538 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/7d4f3b9a7081:0, corePoolSize=5, maxPoolSize=5 2024-12-02T21:09:24,538 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/7d4f3b9a7081:0, corePoolSize=10, maxPoolSize=10 2024-12-02T21:09:24,538 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/7d4f3b9a7081:0, corePoolSize=1, maxPoolSize=1 2024-12-02T21:09:24,538 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/7d4f3b9a7081:0, corePoolSize=2, maxPoolSize=2 2024-12-02T21:09:24,538 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/7d4f3b9a7081:0, corePoolSize=1, maxPoolSize=1 2024-12-02T21:09:24,540 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733173794540 2024-12-02T21:09:24,540 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-02T21:09:24,540 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-02T21:09:24,540 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-02T21:09:24,540 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-02T21:09:24,540 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(96): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, locked=true; InitMetaProcedure table=hbase:meta 2024-12-02T21:09:24,541 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-02T21:09:24,541 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-02T21:09:24,541 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(75): BOOTSTRAP: creating hbase:meta region 2024-12-02T21:09:24,541 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-02T21:09:24,543 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-02T21:09:24,543 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T21:09:24,543 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-02T21:09:24,543 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-02T21:09:24,543 INFO [PEWorker-1 {}] util.FSTableDescriptors(133): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-02T21:09:24,544 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-02T21:09:24,544 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-02T21:09:24,544 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/7d4f3b9a7081:0:becomeActiveMaster-HFileCleaner.large.0-1733173764544,5,FailOnTimeoutGroup] 2024-12-02T21:09:24,545 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/7d4f3b9a7081:0:becomeActiveMaster-HFileCleaner.small.0-1733173764544,5,FailOnTimeoutGroup] 2024-12-02T21:09:24,545 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-02T21:09:24,546 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] master.HMaster(1680): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-02T21:09:24,546 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-02T21:09:24,546 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-02T21:09:24,553 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46749 is added to blk_1073741831_1007 (size=1039) 2024-12-02T21:09:24,553 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44711 is added to blk_1073741831_1007 (size=1039) 2024-12-02T21:09:24,554 INFO [PEWorker-1 {}] util.FSTableDescriptors(140): Updated hbase:meta table descriptor to hdfs://localhost:35937/user/jenkins/test-data/cec094ad-8298-4c24-78f9-fb346a53367e/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1039 2024-12-02T21:09:24,554 INFO [PEWorker-1 {}] regionserver.HRegion(7106): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:35937/user/jenkins/test-data/cec094ad-8298-4c24-78f9-fb346a53367e 2024-12-02T21:09:24,562 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44711 is added to blk_1073741832_1008 (size=32) 2024-12-02T21:09:24,562 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46749 is added to blk_1073741832_1008 (size=32) 2024-12-02T21:09:24,563 DEBUG [PEWorker-1 {}] regionserver.HRegion(894): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T21:09:24,566 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-02T21:09:24,567 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-02T21:09:24,567 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T21:09:24,567 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-02T21:09:24,567 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-02T21:09:24,569 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-02T21:09:24,569 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T21:09:24,569 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-02T21:09:24,569 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-02T21:09:24,570 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-02T21:09:24,570 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T21:09:24,571 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-02T21:09:24,572 DEBUG [PEWorker-1 {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:35937/user/jenkins/test-data/cec094ad-8298-4c24-78f9-fb346a53367e/data/hbase/meta/1588230740 2024-12-02T21:09:24,572 DEBUG [PEWorker-1 {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:35937/user/jenkins/test-data/cec094ad-8298-4c24-78f9-fb346a53367e/data/hbase/meta/1588230740 2024-12-02T21:09:24,574 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-02T21:09:24,575 DEBUG [PEWorker-1 {}] regionserver.HRegion(1085): writing seq id for 1588230740 2024-12-02T21:09:24,577 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:35937/user/jenkins/test-data/cec094ad-8298-4c24-78f9-fb346a53367e/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-02T21:09:24,578 INFO [PEWorker-1 {}] regionserver.HRegion(1102): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=698648, jitterRate=-0.11162367463111877}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-02T21:09:24,578 DEBUG [PEWorker-1 {}] regionserver.HRegion(1001): Region open journal for 1588230740: 2024-12-02T21:09:24,578 DEBUG [PEWorker-1 {}] regionserver.HRegion(1681): Closing 1588230740, disabling compactions & flushes 2024-12-02T21:09:24,578 INFO [PEWorker-1 {}] regionserver.HRegion(1703): Closing region hbase:meta,,1.1588230740 2024-12-02T21:09:24,578 DEBUG [PEWorker-1 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:meta,,1.1588230740 2024-12-02T21:09:24,578 DEBUG [PEWorker-1 {}] regionserver.HRegion(1791): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-02T21:09:24,578 DEBUG [PEWorker-1 {}] regionserver.HRegion(1801): Updates disabled for region hbase:meta,,1.1588230740 2024-12-02T21:09:24,578 INFO [PEWorker-1 {}] regionserver.HRegion(1922): Closed hbase:meta,,1.1588230740 2024-12-02T21:09:24,578 DEBUG [PEWorker-1 {}] regionserver.HRegion(1635): Region close journal for 1588230740: 2024-12-02T21:09:24,579 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(96): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, locked=true; InitMetaProcedure table=hbase:meta 2024-12-02T21:09:24,579 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(107): Going to assign meta 2024-12-02T21:09:24,579 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-02T21:09:24,580 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-02T21:09:24,581 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-02T21:09:24,631 DEBUG [RS:0;7d4f3b9a7081:46151 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;7d4f3b9a7081:46151 2024-12-02T21:09:24,632 INFO [RS:0;7d4f3b9a7081:46151 {}] regionserver.HRegionServer(1008): ClusterId : fb137951-d5f1-4a04-bddc-e180a9ce3020 2024-12-02T21:09:24,632 DEBUG [RS:0;7d4f3b9a7081:46151 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-02T21:09:24,637 DEBUG [RS:0;7d4f3b9a7081:46151 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-02T21:09:24,637 DEBUG [RS:0;7d4f3b9a7081:46151 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-02T21:09:24,645 DEBUG [RS:0;7d4f3b9a7081:46151 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-02T21:09:24,645 DEBUG [RS:0;7d4f3b9a7081:46151 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7ed935f2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T21:09:24,646 DEBUG [RS:0;7d4f3b9a7081:46151 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7b318fb9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=7d4f3b9a7081/172.17.0.2:0 2024-12-02T21:09:24,646 INFO [RS:0;7d4f3b9a7081:46151 {}] regionserver.RegionServerCoprocessorHost(67): System coprocessor loading is enabled 2024-12-02T21:09:24,646 INFO [RS:0;7d4f3b9a7081:46151 {}] regionserver.RegionServerCoprocessorHost(68): Table coprocessor loading is enabled 2024-12-02T21:09:24,646 DEBUG [RS:0;7d4f3b9a7081:46151 {}] regionserver.HRegionServer(1090): About to register with Master. 2024-12-02T21:09:24,647 INFO [RS:0;7d4f3b9a7081:46151 {}] regionserver.HRegionServer(3073): reportForDuty to master=7d4f3b9a7081,34867,1733173764056 with isa=7d4f3b9a7081/172.17.0.2:46151, startcode=1733173764195 2024-12-02T21:09:24,647 DEBUG [RS:0;7d4f3b9a7081:46151 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-02T21:09:24,649 INFO [RS-EventLoopGroup-8-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33709, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.3 (auth:SIMPLE), service=RegionServerStatusService 2024-12-02T21:09:24,650 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34867 {}] master.ServerManager(332): Checking decommissioned status of RegionServer 7d4f3b9a7081,46151,1733173764195 2024-12-02T21:09:24,650 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34867 {}] master.ServerManager(486): Registering regionserver=7d4f3b9a7081,46151,1733173764195 2024-12-02T21:09:24,651 DEBUG [RS:0;7d4f3b9a7081:46151 {}] regionserver.HRegionServer(1725): Config from master: hbase.rootdir=hdfs://localhost:35937/user/jenkins/test-data/cec094ad-8298-4c24-78f9-fb346a53367e 2024-12-02T21:09:24,651 DEBUG [RS:0;7d4f3b9a7081:46151 {}] regionserver.HRegionServer(1725): Config from master: fs.defaultFS=hdfs://localhost:35937 2024-12-02T21:09:24,651 DEBUG [RS:0;7d4f3b9a7081:46151 {}] regionserver.HRegionServer(1725): Config from master: hbase.master.info.port=-1 2024-12-02T21:09:24,661 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34867-0x101992a823a0000, quorum=127.0.0.1:57382, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-02T21:09:24,661 DEBUG [RS:0;7d4f3b9a7081:46151 {}] zookeeper.ZKUtil(111): regionserver:46151-0x101992a823a0001, quorum=127.0.0.1:57382, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/7d4f3b9a7081,46151,1733173764195 2024-12-02T21:09:24,661 WARN [RS:0;7d4f3b9a7081:46151 {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-02T21:09:24,662 INFO [RS:0;7d4f3b9a7081:46151 {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-02T21:09:24,662 DEBUG [RS:0;7d4f3b9a7081:46151 {}] regionserver.HRegionServer(2100): logDir=hdfs://localhost:35937/user/jenkins/test-data/cec094ad-8298-4c24-78f9-fb346a53367e/WALs/7d4f3b9a7081,46151,1733173764195 2024-12-02T21:09:24,662 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [7d4f3b9a7081,46151,1733173764195] 2024-12-02T21:09:24,667 DEBUG [RS:0;7d4f3b9a7081:46151 {}] regionserver.Replication(140): Replication stats-in-log period=300 seconds 2024-12-02T21:09:24,667 INFO [RS:0;7d4f3b9a7081:46151 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-02T21:09:24,669 INFO [RS:0;7d4f3b9a7081:46151 {}] regionserver.MemStoreFlusher(130): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-02T21:09:24,670 INFO [RS:0;7d4f3b9a7081:46151 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-02T21:09:24,670 INFO [RS:0;7d4f3b9a7081:46151 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-02T21:09:24,670 INFO [RS:0;7d4f3b9a7081:46151 {}] regionserver.HRegionServer$CompactionChecker(1988): CompactionChecker runs every PT1S 2024-12-02T21:09:24,671 INFO [RS:0;7d4f3b9a7081:46151 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-02T21:09:24,671 DEBUG [RS:0;7d4f3b9a7081:46151 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/7d4f3b9a7081:0, corePoolSize=1, maxPoolSize=1 2024-12-02T21:09:24,671 DEBUG [RS:0;7d4f3b9a7081:46151 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/7d4f3b9a7081:0, corePoolSize=1, maxPoolSize=1 2024-12-02T21:09:24,671 DEBUG [RS:0;7d4f3b9a7081:46151 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/7d4f3b9a7081:0, corePoolSize=1, maxPoolSize=1 2024-12-02T21:09:24,671 DEBUG [RS:0;7d4f3b9a7081:46151 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/7d4f3b9a7081:0, corePoolSize=1, maxPoolSize=1 2024-12-02T21:09:24,672 DEBUG [RS:0;7d4f3b9a7081:46151 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/7d4f3b9a7081:0, corePoolSize=1, maxPoolSize=1 2024-12-02T21:09:24,672 DEBUG [RS:0;7d4f3b9a7081:46151 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/7d4f3b9a7081:0, corePoolSize=2, maxPoolSize=2 2024-12-02T21:09:24,672 DEBUG [RS:0;7d4f3b9a7081:46151 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/7d4f3b9a7081:0, corePoolSize=1, maxPoolSize=1 2024-12-02T21:09:24,672 DEBUG [RS:0;7d4f3b9a7081:46151 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/7d4f3b9a7081:0, corePoolSize=1, maxPoolSize=1 2024-12-02T21:09:24,672 DEBUG [RS:0;7d4f3b9a7081:46151 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/7d4f3b9a7081:0, corePoolSize=1, maxPoolSize=1 2024-12-02T21:09:24,672 DEBUG [RS:0;7d4f3b9a7081:46151 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/7d4f3b9a7081:0, corePoolSize=1, maxPoolSize=1 2024-12-02T21:09:24,672 DEBUG [RS:0;7d4f3b9a7081:46151 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/7d4f3b9a7081:0, corePoolSize=1, maxPoolSize=1 2024-12-02T21:09:24,672 DEBUG [RS:0;7d4f3b9a7081:46151 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/7d4f3b9a7081:0, corePoolSize=3, maxPoolSize=3 2024-12-02T21:09:24,672 DEBUG [RS:0;7d4f3b9a7081:46151 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/7d4f3b9a7081:0, corePoolSize=3, maxPoolSize=3 2024-12-02T21:09:24,673 INFO [RS:0;7d4f3b9a7081:46151 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-02T21:09:24,673 INFO [RS:0;7d4f3b9a7081:46151 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-02T21:09:24,673 INFO [RS:0;7d4f3b9a7081:46151 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-02T21:09:24,673 INFO [RS:0;7d4f3b9a7081:46151 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-02T21:09:24,673 INFO [RS:0;7d4f3b9a7081:46151 {}] hbase.ChoreService(168): Chore ScheduledChore name=7d4f3b9a7081,46151,1733173764195-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-02T21:09:24,686 INFO [RS:0;7d4f3b9a7081:46151 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-02T21:09:24,686 INFO [RS:0;7d4f3b9a7081:46151 {}] hbase.ChoreService(168): Chore ScheduledChore name=7d4f3b9a7081,46151,1733173764195-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-02T21:09:24,697 INFO [RS:0;7d4f3b9a7081:46151 {}] regionserver.Replication(204): 7d4f3b9a7081,46151,1733173764195 started 2024-12-02T21:09:24,697 INFO [RS:0;7d4f3b9a7081:46151 {}] regionserver.HRegionServer(1767): Serving as 7d4f3b9a7081,46151,1733173764195, RpcServer on 7d4f3b9a7081/172.17.0.2:46151, sessionid=0x101992a823a0001 2024-12-02T21:09:24,697 DEBUG [RS:0;7d4f3b9a7081:46151 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-02T21:09:24,697 DEBUG [RS:0;7d4f3b9a7081:46151 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 7d4f3b9a7081,46151,1733173764195 2024-12-02T21:09:24,697 DEBUG [RS:0;7d4f3b9a7081:46151 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '7d4f3b9a7081,46151,1733173764195' 2024-12-02T21:09:24,697 DEBUG [RS:0;7d4f3b9a7081:46151 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-02T21:09:24,698 DEBUG [RS:0;7d4f3b9a7081:46151 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-02T21:09:24,698 DEBUG [RS:0;7d4f3b9a7081:46151 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-02T21:09:24,698 DEBUG [RS:0;7d4f3b9a7081:46151 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-02T21:09:24,698 DEBUG [RS:0;7d4f3b9a7081:46151 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 7d4f3b9a7081,46151,1733173764195 2024-12-02T21:09:24,698 DEBUG [RS:0;7d4f3b9a7081:46151 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '7d4f3b9a7081,46151,1733173764195' 2024-12-02T21:09:24,698 DEBUG [RS:0;7d4f3b9a7081:46151 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-02T21:09:24,699 DEBUG [RS:0;7d4f3b9a7081:46151 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-02T21:09:24,699 DEBUG [RS:0;7d4f3b9a7081:46151 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-02T21:09:24,699 INFO [RS:0;7d4f3b9a7081:46151 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-02T21:09:24,699 INFO [RS:0;7d4f3b9a7081:46151 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-02T21:09:24,731 WARN [7d4f3b9a7081:34867 {}] assignment.AssignmentManager(2423): No servers available; cannot place 1 unassigned regions. 2024-12-02T21:09:24,803 INFO [RS:0;7d4f3b9a7081:46151 {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=7d4f3b9a7081%2C46151%2C1733173764195, suffix=, logDir=hdfs://localhost:35937/user/jenkins/test-data/cec094ad-8298-4c24-78f9-fb346a53367e/WALs/7d4f3b9a7081,46151,1733173764195, archiveDir=hdfs://localhost:35937/user/jenkins/test-data/cec094ad-8298-4c24-78f9-fb346a53367e/oldWALs, maxLogs=32 2024-12-02T21:09:24,806 INFO [RS:0;7d4f3b9a7081:46151 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7d4f3b9a7081%2C46151%2C1733173764195.1733173764805 2024-12-02T21:09:24,815 INFO [RS:0;7d4f3b9a7081:46151 {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/cec094ad-8298-4c24-78f9-fb346a53367e/WALs/7d4f3b9a7081,46151,1733173764195/7d4f3b9a7081%2C46151%2C1733173764195.1733173764805 2024-12-02T21:09:24,815 DEBUG [RS:0;7d4f3b9a7081:46151 {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44183:44183),(127.0.0.1/127.0.0.1:40199:40199)] 2024-12-02T21:09:24,982 DEBUG [7d4f3b9a7081:34867 {}] assignment.AssignmentManager(2444): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-12-02T21:09:24,983 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=7d4f3b9a7081,46151,1733173764195 2024-12-02T21:09:24,986 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 7d4f3b9a7081,46151,1733173764195, state=OPENING 2024-12-02T21:09:25,020 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-02T21:09:25,028 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34867-0x101992a823a0000, quorum=127.0.0.1:57382, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T21:09:25,028 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46151-0x101992a823a0001, quorum=127.0.0.1:57382, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T21:09:25,030 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE; OpenRegionProcedure 1588230740, server=7d4f3b9a7081,46151,1733173764195}] 2024-12-02T21:09:25,030 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-02T21:09:25,030 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-02T21:09:25,185 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 7d4f3b9a7081,46151,1733173764195 2024-12-02T21:09:25,185 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-02T21:09:25,190 INFO [RS-EventLoopGroup-9-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33820, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-02T21:09:25,195 INFO [RS_OPEN_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(135): Open hbase:meta,,1.1588230740 2024-12-02T21:09:25,195 INFO [RS_OPEN_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-02T21:09:25,198 INFO [RS_OPEN_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=7d4f3b9a7081%2C46151%2C1733173764195.meta, suffix=.meta, logDir=hdfs://localhost:35937/user/jenkins/test-data/cec094ad-8298-4c24-78f9-fb346a53367e/WALs/7d4f3b9a7081,46151,1733173764195, archiveDir=hdfs://localhost:35937/user/jenkins/test-data/cec094ad-8298-4c24-78f9-fb346a53367e/oldWALs, maxLogs=32 2024-12-02T21:09:25,200 INFO [RS_OPEN_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 7d4f3b9a7081%2C46151%2C1733173764195.meta.1733173765199.meta 2024-12-02T21:09:25,207 INFO [RS_OPEN_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/cec094ad-8298-4c24-78f9-fb346a53367e/WALs/7d4f3b9a7081,46151,1733173764195/7d4f3b9a7081%2C46151%2C1733173764195.meta.1733173765199.meta 2024-12-02T21:09:25,207 DEBUG [RS_OPEN_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40199:40199),(127.0.0.1/127.0.0.1:44183:44183)] 2024-12-02T21:09:25,207 DEBUG [RS_OPEN_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7285): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-02T21:09:25,208 DEBUG [RS_OPEN_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-02T21:09:25,208 DEBUG [RS_OPEN_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7999): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-02T21:09:25,208 INFO [RS_OPEN_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(436): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-02T21:09:25,208 DEBUG [RS_OPEN_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-02T21:09:25,208 DEBUG [RS_OPEN_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(894): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T21:09:25,208 DEBUG [RS_OPEN_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7327): checking encryption for 1588230740 2024-12-02T21:09:25,208 DEBUG [RS_OPEN_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7330): checking classloading for 1588230740 2024-12-02T21:09:25,210 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-02T21:09:25,211 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-02T21:09:25,211 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T21:09:25,211 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-02T21:09:25,211 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-02T21:09:25,212 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-02T21:09:25,212 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T21:09:25,213 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-02T21:09:25,213 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-02T21:09:25,214 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-02T21:09:25,214 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T21:09:25,214 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-02T21:09:25,215 DEBUG [RS_OPEN_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:35937/user/jenkins/test-data/cec094ad-8298-4c24-78f9-fb346a53367e/data/hbase/meta/1588230740 2024-12-02T21:09:25,216 DEBUG [RS_OPEN_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:35937/user/jenkins/test-data/cec094ad-8298-4c24-78f9-fb346a53367e/data/hbase/meta/1588230740 2024-12-02T21:09:25,217 DEBUG [RS_OPEN_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-02T21:09:25,218 DEBUG [RS_OPEN_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1085): writing seq id for 1588230740 2024-12-02T21:09:25,218 INFO [RS_OPEN_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1102): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=821188, jitterRate=0.044195279479026794}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-02T21:09:25,219 DEBUG [RS_OPEN_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1001): Region open journal for 1588230740: 2024-12-02T21:09:25,219 INFO [RS_OPEN_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2601): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733173765185 2024-12-02T21:09:25,221 DEBUG [RS_OPEN_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2628): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-02T21:09:25,221 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=7d4f3b9a7081,46151,1733173764195 2024-12-02T21:09:25,222 INFO [RS_OPEN_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(164): Opened hbase:meta,,1.1588230740 2024-12-02T21:09:25,222 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 7d4f3b9a7081,46151,1733173764195, state=OPEN 2024-12-02T21:09:25,269 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34867-0x101992a823a0000, quorum=127.0.0.1:57382, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-02T21:09:25,269 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46151-0x101992a823a0001, quorum=127.0.0.1:57382, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-02T21:09:25,269 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-02T21:09:25,269 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-02T21:09:25,273 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=3, resume processing ppid=2 2024-12-02T21:09:25,273 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=3, ppid=2, state=SUCCESS; OpenRegionProcedure 1588230740, server=7d4f3b9a7081,46151,1733173764195 in 239 msec 2024-12-02T21:09:25,277 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=2, resume processing ppid=1 2024-12-02T21:09:25,277 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=2, ppid=1, state=SUCCESS; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 694 msec 2024-12-02T21:09:25,279 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=1, state=SUCCESS; InitMetaProcedure table=hbase:meta in 745 msec 2024-12-02T21:09:25,280 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] master.HMaster(1088): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733173765280, completionTime=-1 2024-12-02T21:09:25,280 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] master.ServerManager(907): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-12-02T21:09:25,280 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] assignment.AssignmentManager(1747): Joining cluster... 2024-12-02T21:09:25,281 DEBUG [hconnection-0x5ee1e0d-shared-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T21:09:25,282 INFO [RS-EventLoopGroup-9-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33822, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T21:09:25,283 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] assignment.AssignmentManager(1759): Number of RegionServers=1 2024-12-02T21:09:25,283 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733173825283 2024-12-02T21:09:25,283 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733173885283 2024-12-02T21:09:25,283 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] assignment.AssignmentManager(1766): Joined the cluster in 3 msec 2024-12-02T21:09:25,303 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7d4f3b9a7081,34867,1733173764056-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-02T21:09:25,303 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7d4f3b9a7081,34867,1733173764056-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-02T21:09:25,303 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7d4f3b9a7081,34867,1733173764056-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-02T21:09:25,303 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-7d4f3b9a7081:34867, period=300000, unit=MILLISECONDS is enabled. 2024-12-02T21:09:25,303 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-02T21:09:25,303 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] master.TableNamespaceManager(92): Namespace table not found. Creating... 2024-12-02T21:09:25,304 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] master.HMaster(2425): Client=null/null create 'hbase:namespace', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '10', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-02T21:09:25,305 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=hbase:namespace 2024-12-02T21:09:25,305 DEBUG [master/7d4f3b9a7081:0.Chore.1 {}] janitor.CatalogJanitor(179): 2024-12-02T21:09:25,306 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_PRE_OPERATION 2024-12-02T21:09:25,306 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T21:09:25,307 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-02T21:09:25,313 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46749 is added to blk_1073741835_1011 (size=358) 2024-12-02T21:09:25,313 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44711 is added to blk_1073741835_1011 (size=358) 2024-12-02T21:09:25,314 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 16c341a4ab260c1acb120ac1a558413b, NAME => 'hbase:namespace,,1733173765303.16c341a4ab260c1acb120ac1a558413b.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:namespace', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '10', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:35937/user/jenkins/test-data/cec094ad-8298-4c24-78f9-fb346a53367e 2024-12-02T21:09:25,320 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44711 is added to blk_1073741836_1012 (size=42) 2024-12-02T21:09:25,320 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46749 is added to blk_1073741836_1012 (size=42) 2024-12-02T21:09:25,320 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(894): Instantiated hbase:namespace,,1733173765303.16c341a4ab260c1acb120ac1a558413b.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T21:09:25,320 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1681): Closing 16c341a4ab260c1acb120ac1a558413b, disabling compactions & flushes 2024-12-02T21:09:25,321 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1703): Closing region hbase:namespace,,1733173765303.16c341a4ab260c1acb120ac1a558413b. 2024-12-02T21:09:25,321 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:namespace,,1733173765303.16c341a4ab260c1acb120ac1a558413b. 2024-12-02T21:09:25,321 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on hbase:namespace,,1733173765303.16c341a4ab260c1acb120ac1a558413b. after waiting 0 ms 2024-12-02T21:09:25,321 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region hbase:namespace,,1733173765303.16c341a4ab260c1acb120ac1a558413b. 2024-12-02T21:09:25,321 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1922): Closed hbase:namespace,,1733173765303.16c341a4ab260c1acb120ac1a558413b. 2024-12-02T21:09:25,321 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1635): Region close journal for 16c341a4ab260c1acb120ac1a558413b: 2024-12-02T21:09:25,322 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_ADD_TO_META 2024-12-02T21:09:25,322 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"hbase:namespace,,1733173765303.16c341a4ab260c1acb120ac1a558413b.","families":{"info":[{"qualifier":"regioninfo","vlen":41,"tag":[],"timestamp":"1733173765322"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733173765322"}]},"ts":"1733173765322"} 2024-12-02T21:09:25,324 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-02T21:09:25,325 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-02T21:09:25,325 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:namespace","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733173765325"}]},"ts":"1733173765325"} 2024-12-02T21:09:25,327 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:namespace, state=ENABLING in hbase:meta 2024-12-02T21:09:25,344 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:namespace, region=16c341a4ab260c1acb120ac1a558413b, ASSIGN}] 2024-12-02T21:09:25,346 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:namespace, region=16c341a4ab260c1acb120ac1a558413b, ASSIGN 2024-12-02T21:09:25,348 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=hbase:namespace, region=16c341a4ab260c1acb120ac1a558413b, ASSIGN; state=OFFLINE, location=7d4f3b9a7081,46151,1733173764195; forceNewPlan=false, retain=false 2024-12-02T21:09:25,350 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.meta.1733173708307.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:09:25,498 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=5 updating hbase:meta row=16c341a4ab260c1acb120ac1a558413b, regionState=OPENING, regionLocation=7d4f3b9a7081,46151,1733173764195 2024-12-02T21:09:25,501 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE; OpenRegionProcedure 16c341a4ab260c1acb120ac1a558413b, server=7d4f3b9a7081,46151,1733173764195}] 2024-12-02T21:09:25,654 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 7d4f3b9a7081,46151,1733173764195 2024-12-02T21:09:25,659 INFO [RS_OPEN_PRIORITY_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(135): Open hbase:namespace,,1733173765303.16c341a4ab260c1acb120ac1a558413b. 2024-12-02T21:09:25,659 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7285): Opening region: {ENCODED => 16c341a4ab260c1acb120ac1a558413b, NAME => 'hbase:namespace,,1733173765303.16c341a4ab260c1acb120ac1a558413b.', STARTKEY => '', ENDKEY => ''} 2024-12-02T21:09:25,659 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table namespace 16c341a4ab260c1acb120ac1a558413b 2024-12-02T21:09:25,659 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(894): Instantiated hbase:namespace,,1733173765303.16c341a4ab260c1acb120ac1a558413b.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T21:09:25,660 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7327): checking encryption for 16c341a4ab260c1acb120ac1a558413b 2024-12-02T21:09:25,660 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7330): checking classloading for 16c341a4ab260c1acb120ac1a558413b 2024-12-02T21:09:25,662 INFO [StoreOpener-16c341a4ab260c1acb120ac1a558413b-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 16c341a4ab260c1acb120ac1a558413b 2024-12-02T21:09:25,665 INFO [StoreOpener-16c341a4ab260c1acb120ac1a558413b-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 16c341a4ab260c1acb120ac1a558413b columnFamilyName info 2024-12-02T21:09:25,665 DEBUG [StoreOpener-16c341a4ab260c1acb120ac1a558413b-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T21:09:25,666 INFO [StoreOpener-16c341a4ab260c1acb120ac1a558413b-1 {}] regionserver.HStore(327): Store=16c341a4ab260c1acb120ac1a558413b/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T21:09:25,667 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:35937/user/jenkins/test-data/cec094ad-8298-4c24-78f9-fb346a53367e/data/hbase/namespace/16c341a4ab260c1acb120ac1a558413b 2024-12-02T21:09:25,668 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:35937/user/jenkins/test-data/cec094ad-8298-4c24-78f9-fb346a53367e/data/hbase/namespace/16c341a4ab260c1acb120ac1a558413b 2024-12-02T21:09:25,671 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1085): writing seq id for 16c341a4ab260c1acb120ac1a558413b 2024-12-02T21:09:25,674 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:35937/user/jenkins/test-data/cec094ad-8298-4c24-78f9-fb346a53367e/data/hbase/namespace/16c341a4ab260c1acb120ac1a558413b/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-02T21:09:25,675 INFO [RS_OPEN_PRIORITY_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1102): Opened 16c341a4ab260c1acb120ac1a558413b; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=764620, jitterRate=-0.027736514806747437}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-02T21:09:25,675 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1001): Region open journal for 16c341a4ab260c1acb120ac1a558413b: 2024-12-02T21:09:25,677 INFO [RS_OPEN_PRIORITY_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2601): Post open deploy tasks for hbase:namespace,,1733173765303.16c341a4ab260c1acb120ac1a558413b., pid=6, masterSystemTime=1733173765654 2024-12-02T21:09:25,680 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2628): Finished post open deploy task for hbase:namespace,,1733173765303.16c341a4ab260c1acb120ac1a558413b. 2024-12-02T21:09:25,680 INFO [RS_OPEN_PRIORITY_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(164): Opened hbase:namespace,,1733173765303.16c341a4ab260c1acb120ac1a558413b. 2024-12-02T21:09:25,681 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=5 updating hbase:meta row=16c341a4ab260c1acb120ac1a558413b, regionState=OPEN, openSeqNum=2, regionLocation=7d4f3b9a7081,46151,1733173764195 2024-12-02T21:09:25,686 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=6, resume processing ppid=5 2024-12-02T21:09:25,687 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=6, ppid=5, state=SUCCESS; OpenRegionProcedure 16c341a4ab260c1acb120ac1a558413b, server=7d4f3b9a7081,46151,1733173764195 in 182 msec 2024-12-02T21:09:25,689 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=5, resume processing ppid=4 2024-12-02T21:09:25,689 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=5, ppid=4, state=SUCCESS; TransitRegionStateProcedure table=hbase:namespace, region=16c341a4ab260c1acb120ac1a558413b, ASSIGN in 342 msec 2024-12-02T21:09:25,690 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-02T21:09:25,690 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:namespace","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733173765690"}]},"ts":"1733173765690"} 2024-12-02T21:09:25,691 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:namespace, state=ENABLED in hbase:meta 2024-12-02T21:09:25,704 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_POST_OPERATION 2024-12-02T21:09:25,706 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] zookeeper.ZKUtil(113): master:34867-0x101992a823a0000, quorum=127.0.0.1:57382, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/namespace 2024-12-02T21:09:25,706 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=4, state=SUCCESS; CreateTableProcedure table=hbase:namespace in 400 msec 2024-12-02T21:09:25,711 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46151-0x101992a823a0001, quorum=127.0.0.1:57382, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T21:09:25,711 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34867-0x101992a823a0000, quorum=127.0.0.1:57382, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/namespace 2024-12-02T21:09:25,711 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34867-0x101992a823a0000, quorum=127.0.0.1:57382, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T21:09:25,716 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=7, state=RUNNABLE:CREATE_NAMESPACE_PREPARE; CreateNamespaceProcedure, namespace=default 2024-12-02T21:09:25,728 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34867-0x101992a823a0000, quorum=127.0.0.1:57382, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/namespace 2024-12-02T21:09:25,746 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=7, state=SUCCESS; CreateNamespaceProcedure, namespace=default in 30 msec 2024-12-02T21:09:25,749 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=8, state=RUNNABLE:CREATE_NAMESPACE_PREPARE; CreateNamespaceProcedure, namespace=hbase 2024-12-02T21:09:25,769 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34867-0x101992a823a0000, quorum=127.0.0.1:57382, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/namespace 2024-12-02T21:09:25,789 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=8, state=SUCCESS; CreateNamespaceProcedure, namespace=hbase in 39 msec 2024-12-02T21:09:25,811 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34867-0x101992a823a0000, quorum=127.0.0.1:57382, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/namespace/default 2024-12-02T21:09:25,828 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34867-0x101992a823a0000, quorum=127.0.0.1:57382, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/namespace/hbase 2024-12-02T21:09:25,828 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] master.HMaster(1218): Master has completed initialization 1.592sec 2024-12-02T21:09:25,828 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-02T21:09:25,828 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-02T21:09:25,828 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-02T21:09:25,828 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-02T21:09:25,828 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-02T21:09:25,828 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7d4f3b9a7081,34867,1733173764056-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-02T21:09:25,828 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7d4f3b9a7081,34867,1733173764056-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-02T21:09:25,831 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] master.HMaster(1321): Balancer post startup initialization complete, took 0 seconds 2024-12-02T21:09:25,831 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-02T21:09:25,831 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7d4f3b9a7081,34867,1733173764056-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-02T21:09:25,920 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x0592832b to 127.0.0.1:57382 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@43475e30 2024-12-02T21:09:25,929 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@754c8726, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T21:09:25,931 DEBUG [hconnection-0xc03e161-shared-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T21:09:25,933 INFO [RS-EventLoopGroup-9-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33838, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T21:09:25,935 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1199): Minicluster is up; activeMaster=7d4f3b9a7081,34867,1733173764056 2024-12-02T21:09:25,936 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T21:09:25,939 INFO [Time-limited test {}] master.MasterRpcServices(506): Client=null/null set balanceSwitch=false 2024-12-02T21:09:25,940 INFO [Time-limited test {}] wal.TestLogRolling(297): Starting testLogRollOnPipelineRestart 2024-12-02T21:09:25,940 INFO [Time-limited test {}] wal.TestLogRolling(300): Replication=2 2024-12-02T21:09:25,941 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-02T21:09:25,943 INFO [RS-EventLoopGroup-8-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34808, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-02T21:09:25,944 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34867 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-12-02T21:09:25,944 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34867 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-12-02T21:09:25,944 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34867 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestLogRolling-testLogRollOnPipelineRestart', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-02T21:09:25,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34867 {}] procedure2.ProcedureExecutor(1098): Stored pid=9, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart 2024-12-02T21:09:25,947 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_PRE_OPERATION 2024-12-02T21:09:25,947 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T21:09:25,947 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34867 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRollOnPipelineRestart" procId is: 9 2024-12-02T21:09:25,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34867 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-02T21:09:25,949 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-02T21:09:25,956 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44711 is added to blk_1073741837_1013 (size=395) 2024-12-02T21:09:25,957 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46749 is added to blk_1073741837_1013 (size=395) 2024-12-02T21:09:25,958 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 6b26b280a8a37661df64b9f3e64f75d5, NAME => 'TestLogRolling-testLogRollOnPipelineRestart,,1733173765944.6b26b280a8a37661df64b9f3e64f75d5.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRollOnPipelineRestart', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:35937/user/jenkins/test-data/cec094ad-8298-4c24-78f9-fb346a53367e 2024-12-02T21:09:25,964 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44711 is added to blk_1073741838_1014 (size=78) 2024-12-02T21:09:25,964 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46749 is added to blk_1073741838_1014 (size=78) 2024-12-02T21:09:25,965 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(894): Instantiated TestLogRolling-testLogRollOnPipelineRestart,,1733173765944.6b26b280a8a37661df64b9f3e64f75d5.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T21:09:25,965 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1681): Closing 6b26b280a8a37661df64b9f3e64f75d5, disabling compactions & flushes 2024-12-02T21:09:25,965 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1703): Closing region TestLogRolling-testLogRollOnPipelineRestart,,1733173765944.6b26b280a8a37661df64b9f3e64f75d5. 2024-12-02T21:09:25,965 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestLogRolling-testLogRollOnPipelineRestart,,1733173765944.6b26b280a8a37661df64b9f3e64f75d5. 2024-12-02T21:09:25,965 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestLogRolling-testLogRollOnPipelineRestart,,1733173765944.6b26b280a8a37661df64b9f3e64f75d5. after waiting 0 ms 2024-12-02T21:09:25,965 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestLogRolling-testLogRollOnPipelineRestart,,1733173765944.6b26b280a8a37661df64b9f3e64f75d5. 2024-12-02T21:09:25,965 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1922): Closed TestLogRolling-testLogRollOnPipelineRestart,,1733173765944.6b26b280a8a37661df64b9f3e64f75d5. 2024-12-02T21:09:25,966 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1635): Region close journal for 6b26b280a8a37661df64b9f3e64f75d5: 2024-12-02T21:09:25,967 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_ADD_TO_META 2024-12-02T21:09:25,967 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestLogRolling-testLogRollOnPipelineRestart,,1733173765944.6b26b280a8a37661df64b9f3e64f75d5.","families":{"info":[{"qualifier":"regioninfo","vlen":77,"tag":[],"timestamp":"1733173765967"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733173765967"}]},"ts":"1733173765967"} 2024-12-02T21:09:25,969 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-02T21:09:25,969 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-02T21:09:25,970 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnPipelineRestart","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733173765969"}]},"ts":"1733173765969"} 2024-12-02T21:09:25,971 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestLogRolling-testLogRollOnPipelineRestart, state=ENABLING in hbase:meta 2024-12-02T21:09:25,986 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=6b26b280a8a37661df64b9f3e64f75d5, ASSIGN}] 2024-12-02T21:09:25,987 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=6b26b280a8a37661df64b9f3e64f75d5, ASSIGN 2024-12-02T21:09:25,988 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(264): Starting pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=6b26b280a8a37661df64b9f3e64f75d5, ASSIGN; state=OFFLINE, location=7d4f3b9a7081,46151,1733173764195; forceNewPlan=false, retain=false 2024-12-02T21:09:26,139 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=10 updating hbase:meta row=6b26b280a8a37661df64b9f3e64f75d5, regionState=OPENING, regionLocation=7d4f3b9a7081,46151,1733173764195 2024-12-02T21:09:26,143 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=11, ppid=10, state=RUNNABLE; OpenRegionProcedure 6b26b280a8a37661df64b9f3e64f75d5, server=7d4f3b9a7081,46151,1733173764195}] 2024-12-02T21:09:26,299 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 7d4f3b9a7081,46151,1733173764195 2024-12-02T21:09:26,304 INFO [RS_OPEN_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(135): Open TestLogRolling-testLogRollOnPipelineRestart,,1733173765944.6b26b280a8a37661df64b9f3e64f75d5. 2024-12-02T21:09:26,304 DEBUG [RS_OPEN_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7285): Opening region: {ENCODED => 6b26b280a8a37661df64b9f3e64f75d5, NAME => 'TestLogRolling-testLogRollOnPipelineRestart,,1733173765944.6b26b280a8a37661df64b9f3e64f75d5.', STARTKEY => '', ENDKEY => ''} 2024-12-02T21:09:26,305 DEBUG [RS_OPEN_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRollOnPipelineRestart 6b26b280a8a37661df64b9f3e64f75d5 2024-12-02T21:09:26,305 DEBUG [RS_OPEN_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(894): Instantiated TestLogRolling-testLogRollOnPipelineRestart,,1733173765944.6b26b280a8a37661df64b9f3e64f75d5.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T21:09:26,305 DEBUG [RS_OPEN_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7327): checking encryption for 6b26b280a8a37661df64b9f3e64f75d5 2024-12-02T21:09:26,305 DEBUG [RS_OPEN_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7330): checking classloading for 6b26b280a8a37661df64b9f3e64f75d5 2024-12-02T21:09:26,308 INFO [StoreOpener-6b26b280a8a37661df64b9f3e64f75d5-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 6b26b280a8a37661df64b9f3e64f75d5 2024-12-02T21:09:26,310 INFO [StoreOpener-6b26b280a8a37661df64b9f3e64f75d5-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 6b26b280a8a37661df64b9f3e64f75d5 columnFamilyName info 2024-12-02T21:09:26,310 DEBUG [StoreOpener-6b26b280a8a37661df64b9f3e64f75d5-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T21:09:26,311 INFO [StoreOpener-6b26b280a8a37661df64b9f3e64f75d5-1 {}] regionserver.HStore(327): Store=6b26b280a8a37661df64b9f3e64f75d5/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T21:09:26,312 DEBUG [RS_OPEN_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:35937/user/jenkins/test-data/cec094ad-8298-4c24-78f9-fb346a53367e/data/default/TestLogRolling-testLogRollOnPipelineRestart/6b26b280a8a37661df64b9f3e64f75d5 2024-12-02T21:09:26,313 DEBUG [RS_OPEN_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:35937/user/jenkins/test-data/cec094ad-8298-4c24-78f9-fb346a53367e/data/default/TestLogRolling-testLogRollOnPipelineRestart/6b26b280a8a37661df64b9f3e64f75d5 2024-12-02T21:09:26,317 DEBUG [RS_OPEN_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1085): writing seq id for 6b26b280a8a37661df64b9f3e64f75d5 2024-12-02T21:09:26,320 DEBUG [RS_OPEN_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:35937/user/jenkins/test-data/cec094ad-8298-4c24-78f9-fb346a53367e/data/default/TestLogRolling-testLogRollOnPipelineRestart/6b26b280a8a37661df64b9f3e64f75d5/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-02T21:09:26,321 INFO [RS_OPEN_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1102): Opened 6b26b280a8a37661df64b9f3e64f75d5; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=756543, jitterRate=-0.03800635039806366}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-02T21:09:26,322 DEBUG [RS_OPEN_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1001): Region open journal for 6b26b280a8a37661df64b9f3e64f75d5: 2024-12-02T21:09:26,323 INFO [RS_OPEN_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2601): Post open deploy tasks for TestLogRolling-testLogRollOnPipelineRestart,,1733173765944.6b26b280a8a37661df64b9f3e64f75d5., pid=11, masterSystemTime=1733173766298 2024-12-02T21:09:26,325 DEBUG [RS_OPEN_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2628): Finished post open deploy task for TestLogRolling-testLogRollOnPipelineRestart,,1733173765944.6b26b280a8a37661df64b9f3e64f75d5. 2024-12-02T21:09:26,325 INFO [RS_OPEN_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(164): Opened TestLogRolling-testLogRollOnPipelineRestart,,1733173765944.6b26b280a8a37661df64b9f3e64f75d5. 2024-12-02T21:09:26,326 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=10 updating hbase:meta row=6b26b280a8a37661df64b9f3e64f75d5, regionState=OPEN, openSeqNum=2, regionLocation=7d4f3b9a7081,46151,1733173764195 2024-12-02T21:09:26,330 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=11, resume processing ppid=10 2024-12-02T21:09:26,330 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=11, ppid=10, state=SUCCESS; OpenRegionProcedure 6b26b280a8a37661df64b9f3e64f75d5, server=7d4f3b9a7081,46151,1733173764195 in 185 msec 2024-12-02T21:09:26,332 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=10, resume processing ppid=9 2024-12-02T21:09:26,332 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=10, ppid=9, state=SUCCESS; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=6b26b280a8a37661df64b9f3e64f75d5, ASSIGN in 344 msec 2024-12-02T21:09:26,332 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-02T21:09:26,333 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnPipelineRestart","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733173766332"}]},"ts":"1733173766332"} 2024-12-02T21:09:26,334 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestLogRolling-testLogRollOnPipelineRestart, state=ENABLED in hbase:meta 2024-12-02T21:09:26,351 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.meta.1733173708307.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:09:26,404 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_POST_OPERATION 2024-12-02T21:09:26,409 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=9, state=SUCCESS; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart in 461 msec 2024-12-02T21:09:27,352 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.meta.1733173708307.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:09:28,353 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.meta.1733173708307.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:09:29,354 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.meta.1733173708307.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:09:29,512 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-02T21:09:29,530 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:09:29,530 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:09:29,531 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:09:29,531 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:09:29,531 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:09:29,531 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:09:29,533 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:09:29,535 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:09:30,356 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.meta.1733173708307.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:09:30,667 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRollOnPipelineRestart' 2024-12-02T21:09:31,357 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.meta.1733173708307.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:09:32,358 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.meta.1733173708307.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:09:33,359 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.meta.1733173708307.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:09:34,008 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart 2024-12-02T21:09:34,008 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart Metrics about Tables on a single HBase RegionServer 2024-12-02T21:09:34,011 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-12-02T21:09:34,011 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-12-02T21:09:34,360 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.meta.1733173708307.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:09:35,361 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.meta.1733173708307.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:09:35,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34867 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-02T21:09:35,952 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestLogRolling-testLogRollOnPipelineRestart, procId: 9 completed 2024-12-02T21:09:35,958 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2718): Found 1 regions for table TestLogRolling-testLogRollOnPipelineRestart 2024-12-02T21:09:35,958 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2724): firstRegionName=TestLogRolling-testLogRollOnPipelineRestart,,1733173765944.6b26b280a8a37661df64b9f3e64f75d5. 2024-12-02T21:09:36,363 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.meta.1733173708307.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:09:37,364 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.meta.1733173708307.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:09:37,966 INFO [Time-limited test {}] wal.TestLogRolling(337): log.getCurrentFileName()): hdfs://localhost:35937/user/jenkins/test-data/cec094ad-8298-4c24-78f9-fb346a53367e/WALs/7d4f3b9a7081,46151,1733173764195/7d4f3b9a7081%2C46151%2C1733173764195.1733173764805 2024-12-02T21:09:37,967 WARN [ResponseProcessor for block BP-1649102533-172.17.0.2-1733173762666:blk_1073741833_1009 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1649102533-172.17.0.2-1733173762666:blk_1073741833_1009 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T21:09:37,968 WARN [ResponseProcessor for block BP-1649102533-172.17.0.2-1733173762666:blk_1073741834_1010 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1649102533-172.17.0.2-1733173762666:blk_1073741834_1010 java.io.IOException: Bad response ERROR for BP-1649102533-172.17.0.2-1733173762666:blk_1073741834_1010 from datanode DatanodeInfoWithStorage[127.0.0.1:46749,DS-a09a5dec-f5bf-45ab-bc95-4ef2c766b66a,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T21:09:37,968 WARN [ResponseProcessor for block BP-1649102533-172.17.0.2-1733173762666:blk_1073741830_1006 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1649102533-172.17.0.2-1733173762666:blk_1073741830_1006 java.io.IOException: Bad response ERROR for BP-1649102533-172.17.0.2-1733173762666:blk_1073741830_1006 from datanode DatanodeInfoWithStorage[127.0.0.1:46749,DS-a09a5dec-f5bf-45ab-bc95-4ef2c766b66a,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T21:09:37,969 WARN [DataStreamer for file /user/jenkins/test-data/cec094ad-8298-4c24-78f9-fb346a53367e/MasterData/WALs/7d4f3b9a7081,34867,1733173764056/7d4f3b9a7081%2C34867%2C1733173764056.1733173764288 block BP-1649102533-172.17.0.2-1733173762666:blk_1073741830_1006 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1649102533-172.17.0.2-1733173762666:blk_1073741830_1006 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44711,DS-020c589a-a637-4a0b-91c5-4976ea7c1c25,DISK], DatanodeInfoWithStorage[127.0.0.1:46749,DS-a09a5dec-f5bf-45ab-bc95-4ef2c766b66a,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:46749,DS-a09a5dec-f5bf-45ab-bc95-4ef2c766b66a,DISK]) is bad. 2024-12-02T21:09:37,969 WARN [DataStreamer for file /user/jenkins/test-data/cec094ad-8298-4c24-78f9-fb346a53367e/WALs/7d4f3b9a7081,46151,1733173764195/7d4f3b9a7081%2C46151%2C1733173764195.1733173764805 block BP-1649102533-172.17.0.2-1733173762666:blk_1073741833_1009 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1649102533-172.17.0.2-1733173762666:blk_1073741833_1009 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46749,DS-a09a5dec-f5bf-45ab-bc95-4ef2c766b66a,DISK], DatanodeInfoWithStorage[127.0.0.1:44711,DS-020c589a-a637-4a0b-91c5-4976ea7c1c25,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:46749,DS-a09a5dec-f5bf-45ab-bc95-4ef2c766b66a,DISK]) is bad. 2024-12-02T21:09:37,970 WARN [DataStreamer for file /user/jenkins/test-data/cec094ad-8298-4c24-78f9-fb346a53367e/WALs/7d4f3b9a7081,46151,1733173764195/7d4f3b9a7081%2C46151%2C1733173764195.meta.1733173765199.meta block BP-1649102533-172.17.0.2-1733173762666:blk_1073741834_1010 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1649102533-172.17.0.2-1733173762666:blk_1073741834_1010 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44711,DS-020c589a-a637-4a0b-91c5-4976ea7c1c25,DISK], DatanodeInfoWithStorage[127.0.0.1:46749,DS-a09a5dec-f5bf-45ab-bc95-4ef2c766b66a,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:46749,DS-a09a5dec-f5bf-45ab-bc95-4ef2c766b66a,DISK]) is bad. 2024-12-02T21:09:37,969 WARN [PacketResponder: BP-1649102533-172.17.0.2-1733173762666:blk_1073741834_1010, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:46749] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T21:09:37,969 WARN [PacketResponder: BP-1649102533-172.17.0.2-1733173762666:blk_1073741830_1006, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:46749] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T21:09:37,970 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1910428619_22 at /127.0.0.1:34896 [Receiving block BP-1649102533-172.17.0.2-1733173762666:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:44711:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:34896 dst: /127.0.0.1:44711 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T21:09:37,971 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-81234619_22 at /127.0.0.1:34868 [Receiving block BP-1649102533-172.17.0.2-1733173762666:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:44711:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:34868 dst: /127.0.0.1:44711 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T21:09:37,971 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1910428619_22 at /127.0.0.1:49084 [Receiving block BP-1649102533-172.17.0.2-1733173762666:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:46749:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:49084 dst: /127.0.0.1:46749 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T21:09:37,971 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1910428619_22 at /127.0.0.1:34892 [Receiving block BP-1649102533-172.17.0.2-1733173762666:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:44711:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:34892 dst: /127.0.0.1:44711 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T21:09:37,971 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-81234619_22 at /127.0.0.1:49052 [Receiving block BP-1649102533-172.17.0.2-1733173762666:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:46749:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:49052 dst: /127.0.0.1:46749 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T21:09:37,972 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1910428619_22 at /127.0.0.1:49092 [Receiving block BP-1649102533-172.17.0.2-1733173762666:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:46749:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:49092 dst: /127.0.0.1:46749 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T21:09:37,976 WARN [BP-1649102533-172.17.0.2-1733173762666 heartbeating to localhost/127.0.0.1:35937 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1649102533-172.17.0.2-1733173762666 (Datanode Uuid 71ceb9c5-600b-4a8b-97c6-ef649952eebf) service to localhost/127.0.0.1:35937 2024-12-02T21:09:37,989 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/6022e80c-ee5e-76b3-2ef8-b676205a65e1/cluster_a36dc0da-24c9-4660-2142-0a0442b4566b/dfs/data/data3/current/BP-1649102533-172.17.0.2-1733173762666 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-02T21:09:37,989 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/6022e80c-ee5e-76b3-2ef8-b676205a65e1/cluster_a36dc0da-24c9-4660-2142-0a0442b4566b/dfs/data/data4/current/BP-1649102533-172.17.0.2-1733173762666 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-02T21:09:37,990 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5c0f1ef4{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-02T21:09:37,990 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3a1e6fd8{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-02T21:09:37,990 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-02T21:09:37,990 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@580d9f1c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-02T21:09:37,991 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@26fd3d4f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/6022e80c-ee5e-76b3-2ef8-b676205a65e1/hadoop.log.dir/,STOPPED} 2024-12-02T21:09:37,992 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-02T21:09:38,000 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-02T21:09:38,003 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-02T21:09:38,007 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-02T21:09:38,007 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-02T21:09:38,007 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-02T21:09:38,007 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@73270302{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/6022e80c-ee5e-76b3-2ef8-b676205a65e1/hadoop.log.dir/,AVAILABLE} 2024-12-02T21:09:38,008 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6af63917{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-02T21:09:38,097 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@2d80f62b{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/6022e80c-ee5e-76b3-2ef8-b676205a65e1/java.io.tmpdir/jetty-localhost-46125-hadoop-hdfs-3_4_1-tests_jar-_-any-6067153755588039255/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-02T21:09:38,097 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@249e2570{HTTP/1.1, (http/1.1)}{localhost:46125} 2024-12-02T21:09:38,097 INFO [Time-limited test {}] server.Server(415): Started @189477ms 2024-12-02T21:09:38,098 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-02T21:09:38,112 WARN [ResponseProcessor for block BP-1649102533-172.17.0.2-1733173762666:blk_1073741830_1015 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1649102533-172.17.0.2-1733173762666:blk_1073741830_1015 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T21:09:38,112 WARN [ResponseProcessor for block BP-1649102533-172.17.0.2-1733173762666:blk_1073741833_1016 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1649102533-172.17.0.2-1733173762666:blk_1073741833_1016 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T21:09:38,112 WARN [ResponseProcessor for block BP-1649102533-172.17.0.2-1733173762666:blk_1073741834_1017 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1649102533-172.17.0.2-1733173762666:blk_1073741834_1017 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T21:09:38,112 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-81234619_22 at /127.0.0.1:55522 [Receiving block BP-1649102533-172.17.0.2-1733173762666:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:44711:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:55522 dst: /127.0.0.1:44711 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T21:09:38,112 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1910428619_22 at /127.0.0.1:55534 [Receiving block BP-1649102533-172.17.0.2-1733173762666:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:44711:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:55534 dst: /127.0.0.1:44711 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T21:09:38,112 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1910428619_22 at /127.0.0.1:55536 [Receiving block BP-1649102533-172.17.0.2-1733173762666:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:44711:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:55536 dst: /127.0.0.1:44711 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T21:09:38,114 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7516fc94{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-02T21:09:38,114 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@1df7a86c{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-02T21:09:38,114 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-02T21:09:38,114 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5e9bbc9d{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-02T21:09:38,114 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6d7ae91e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/6022e80c-ee5e-76b3-2ef8-b676205a65e1/hadoop.log.dir/,STOPPED} 2024-12-02T21:09:38,115 WARN [BP-1649102533-172.17.0.2-1733173762666 heartbeating to localhost/127.0.0.1:35937 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-02T21:09:38,115 WARN [BP-1649102533-172.17.0.2-1733173762666 heartbeating to localhost/127.0.0.1:35937 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1649102533-172.17.0.2-1733173762666 (Datanode Uuid 6d3579fa-125d-47cd-85cc-8fd66e40db23) service to localhost/127.0.0.1:35937 2024-12-02T21:09:38,115 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-02T21:09:38,115 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-02T21:09:38,115 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/6022e80c-ee5e-76b3-2ef8-b676205a65e1/cluster_a36dc0da-24c9-4660-2142-0a0442b4566b/dfs/data/data1/current/BP-1649102533-172.17.0.2-1733173762666 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-02T21:09:38,116 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/6022e80c-ee5e-76b3-2ef8-b676205a65e1/cluster_a36dc0da-24c9-4660-2142-0a0442b4566b/dfs/data/data2/current/BP-1649102533-172.17.0.2-1733173762666 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-02T21:09:38,116 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-02T21:09:38,121 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-02T21:09:38,124 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-02T21:09:38,125 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-02T21:09:38,125 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-02T21:09:38,125 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-02T21:09:38,125 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6ae49f61{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/6022e80c-ee5e-76b3-2ef8-b676205a65e1/hadoop.log.dir/,AVAILABLE} 2024-12-02T21:09:38,126 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@30e9ee1c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-02T21:09:38,214 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@725fbe92{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/6022e80c-ee5e-76b3-2ef8-b676205a65e1/java.io.tmpdir/jetty-localhost-40139-hadoop-hdfs-3_4_1-tests_jar-_-any-3999209225484591736/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-02T21:09:38,214 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@4a2886b7{HTTP/1.1, (http/1.1)}{localhost:40139} 2024-12-02T21:09:38,214 INFO [Time-limited test {}] server.Server(415): Started @189594ms 2024-12-02T21:09:38,215 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-02T21:09:38,366 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.meta.1733173708307.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:09:38,422 WARN [Thread-1086 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-02T21:09:38,424 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x90e72b5906fb10b4 with lease ID 0x7c8b4e6cc8a01645: from storage DS-a09a5dec-f5bf-45ab-bc95-4ef2c766b66a node DatanodeRegistration(127.0.0.1:32843, datanodeUuid=71ceb9c5-600b-4a8b-97c6-ef649952eebf, infoPort=46423, infoSecurePort=0, ipcPort=41585, storageInfo=lv=-57;cid=testClusterID;nsid=630606353;c=1733173762666), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-02T21:09:38,424 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x90e72b5906fb10b4 with lease ID 0x7c8b4e6cc8a01645: from storage DS-0cc2c10b-820b-4fc4-8adb-2ef253f1b358 node DatanodeRegistration(127.0.0.1:32843, datanodeUuid=71ceb9c5-600b-4a8b-97c6-ef649952eebf, infoPort=46423, infoSecurePort=0, ipcPort=41585, storageInfo=lv=-57;cid=testClusterID;nsid=630606353;c=1733173762666), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-02T21:09:38,542 WARN [Thread-1106 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-02T21:09:38,544 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x75ce22072adaa401 with lease ID 0x7c8b4e6cc8a01646: from storage DS-020c589a-a637-4a0b-91c5-4976ea7c1c25 node DatanodeRegistration(127.0.0.1:44371, datanodeUuid=6d3579fa-125d-47cd-85cc-8fd66e40db23, infoPort=34125, infoSecurePort=0, ipcPort=34999, storageInfo=lv=-57;cid=testClusterID;nsid=630606353;c=1733173762666), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-02T21:09:38,544 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x75ce22072adaa401 with lease ID 0x7c8b4e6cc8a01646: from storage DS-c87b65bb-8a6c-4814-9307-d7ebddd821fe node DatanodeRegistration(127.0.0.1:44371, datanodeUuid=6d3579fa-125d-47cd-85cc-8fd66e40db23, infoPort=34125, infoSecurePort=0, ipcPort=34999, storageInfo=lv=-57;cid=testClusterID;nsid=630606353;c=1733173762666), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-02T21:09:39,231 INFO [Time-limited test {}] wal.TestLogRolling(349): Data Nodes restarted 2024-12-02T21:09:39,234 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1002 2024-12-02T21:09:39,235 WARN [RS:0;7d4f3b9a7081:46151.append-pool-0 {}] wal.FSHLog$RingBufferEventHandler(1189): Append sequenceId=5, requesting roll of WAL java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44711,DS-020c589a-a637-4a0b-91c5-4976ea7c1c25,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T21:09:39,236 DEBUG [regionserver/7d4f3b9a7081:0.logRoller {}] wal.AbstractWALRoller(197): WAL FSHLog 7d4f3b9a7081%2C46151%2C1733173764195:(num 1733173764805) roll requested 2024-12-02T21:09:39,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46151 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=5, requesting roll of WAL at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.append(FSHLog.java:1191) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.onEvent(FSHLog.java:1064) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.onEvent(FSHLog.java:967) ~[classes/:?] at com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:168) ~[disruptor-3.4.4.jar:?] at com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) ~[disruptor-3.4.4.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44711,DS-020c589a-a637-4a0b-91c5-4976ea7c1c25,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T21:09:39,236 INFO [regionserver/7d4f3b9a7081:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7d4f3b9a7081%2C46151%2C1733173764195.1733173779236 2024-12-02T21:09:39,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46151 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:33838 deadline: 1733173789235, exception=org.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=5, requesting roll of WAL 2024-12-02T21:09:39,241 DEBUG [regionserver/7d4f3b9a7081:0.logRoller {}] wal.TestLogRolling$2(324): preLogRoll: oldFile=hdfs://localhost:35937/user/jenkins/test-data/cec094ad-8298-4c24-78f9-fb346a53367e/WALs/7d4f3b9a7081,46151,1733173764195/7d4f3b9a7081%2C46151%2C1733173764195.1733173764805 newFile=hdfs://localhost:35937/user/jenkins/test-data/cec094ad-8298-4c24-78f9-fb346a53367e/WALs/7d4f3b9a7081,46151,1733173764195/7d4f3b9a7081%2C46151%2C1733173764195.1733173779236 2024-12-02T21:09:39,242 WARN [regionserver/7d4f3b9a7081:0.logRoller {}] wal.FSHLog(373): Failed sync-before-close but no outstanding appends; closing WALorg.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=5, requesting roll of WAL 2024-12-02T21:09:39,242 INFO [regionserver/7d4f3b9a7081:0.logRoller {}] wal.AbstractFSWAL(837): Rolled WAL /user/jenkins/test-data/cec094ad-8298-4c24-78f9-fb346a53367e/WALs/7d4f3b9a7081,46151,1733173764195/7d4f3b9a7081%2C46151%2C1733173764195.1733173764805 with entries=5, filesize=2.09 KB; new WAL /user/jenkins/test-data/cec094ad-8298-4c24-78f9-fb346a53367e/WALs/7d4f3b9a7081,46151,1733173764195/7d4f3b9a7081%2C46151%2C1733173764195.1733173779236 2024-12-02T21:09:39,242 DEBUG [regionserver/7d4f3b9a7081:0.logRoller {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46423:46423),(127.0.0.1/127.0.0.1:34125:34125)] 2024-12-02T21:09:39,242 DEBUG [regionserver/7d4f3b9a7081:0.logRoller {}] wal.AbstractFSWAL(751): hdfs://localhost:35937/user/jenkins/test-data/cec094ad-8298-4c24-78f9-fb346a53367e/WALs/7d4f3b9a7081,46151,1733173764195/7d4f3b9a7081%2C46151%2C1733173764195.1733173764805 is not closed yet, will try archiving it next time 2024-12-02T21:09:39,242 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44711,DS-020c589a-a637-4a0b-91c5-4976ea7c1c25,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T21:09:39,242 WARN [Close-WAL-Writer-0 {}] wal.FSHLog(462): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44711,DS-020c589a-a637-4a0b-91c5-4976ea7c1c25,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T21:09:39,242 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:35937/user/jenkins/test-data/cec094ad-8298-4c24-78f9-fb346a53367e/WALs/7d4f3b9a7081,46151,1733173764195/7d4f3b9a7081%2C46151%2C1733173764195.1733173764805 2024-12-02T21:09:39,243 WARN [IPC Server handler 1 on default port 35937 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/cec094ad-8298-4c24-78f9-fb346a53367e/WALs/7d4f3b9a7081,46151,1733173764195/7d4f3b9a7081%2C46151%2C1733173764195.1733173764805 has not been closed. Lease recovery is in progress. RecoveryId = 1019 for block blk_1073741833_1016 2024-12-02T21:09:39,243 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:35937/user/jenkins/test-data/cec094ad-8298-4c24-78f9-fb346a53367e/WALs/7d4f3b9a7081,46151,1733173764195/7d4f3b9a7081%2C46151%2C1733173764195.1733173764805 after 1ms 2024-12-02T21:09:39,367 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.meta.1733173708307.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:09:40,368 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.meta.1733173708307.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:09:41,369 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.meta.1733173708307.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:09:41,425 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741833_1016: GenerationStamp not matched, existing replica is blk_1073741833_1009 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-12-02T21:09:42,370 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.meta.1733173708307.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:09:43,244 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:35937/user/jenkins/test-data/cec094ad-8298-4c24-78f9-fb346a53367e/WALs/7d4f3b9a7081,46151,1733173764195/7d4f3b9a7081%2C46151%2C1733173764195.1733173764805 after 4002ms 2024-12-02T21:09:43,370 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.meta.1733173708307.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:09:44,371 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.meta.1733173708307.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:09:45,372 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.meta.1733173708307.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:09:46,373 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.meta.1733173708307.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:09:47,373 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.meta.1733173708307.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:09:48,374 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.meta.1733173708307.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:09:49,375 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.meta.1733173708307.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:09:50,376 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.meta.1733173708307.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:09:51,268 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1003 2024-12-02T21:09:51,377 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.meta.1733173708307.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:09:52,378 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.meta.1733173708307.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:09:53,272 WARN [ResponseProcessor for block BP-1649102533-172.17.0.2-1733173762666:blk_1073741839_1018 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1649102533-172.17.0.2-1733173762666:blk_1073741839_1018 java.io.IOException: Bad response ERROR for BP-1649102533-172.17.0.2-1733173762666:blk_1073741839_1018 from datanode DatanodeInfoWithStorage[127.0.0.1:44371,DS-020c589a-a637-4a0b-91c5-4976ea7c1c25,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T21:09:53,272 WARN [DataStreamer for file /user/jenkins/test-data/cec094ad-8298-4c24-78f9-fb346a53367e/WALs/7d4f3b9a7081,46151,1733173764195/7d4f3b9a7081%2C46151%2C1733173764195.1733173779236 block BP-1649102533-172.17.0.2-1733173762666:blk_1073741839_1018 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1649102533-172.17.0.2-1733173762666:blk_1073741839_1018 in pipeline [DatanodeInfoWithStorage[127.0.0.1:32843,DS-a09a5dec-f5bf-45ab-bc95-4ef2c766b66a,DISK], DatanodeInfoWithStorage[127.0.0.1:44371,DS-020c589a-a637-4a0b-91c5-4976ea7c1c25,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:44371,DS-020c589a-a637-4a0b-91c5-4976ea7c1c25,DISK]) is bad. 2024-12-02T21:09:53,272 WARN [PacketResponder: BP-1649102533-172.17.0.2-1733173762666:blk_1073741839_1018, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:44371] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T21:09:53,273 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1910428619_22 at /127.0.0.1:45912 [Receiving block BP-1649102533-172.17.0.2-1733173762666:blk_1073741839_1018] {}] datanode.DataXceiver(331): 127.0.0.1:32843:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:45912 dst: /127.0.0.1:32843 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T21:09:53,274 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1910428619_22 at /127.0.0.1:36728 [Receiving block BP-1649102533-172.17.0.2-1733173762666:blk_1073741839_1018] {}] datanode.DataXceiver(331): 127.0.0.1:44371:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:36728 dst: /127.0.0.1:44371 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T21:09:53,325 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@725fbe92{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-02T21:09:53,325 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@4a2886b7{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-02T21:09:53,325 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-02T21:09:53,326 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@30e9ee1c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-02T21:09:53,326 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6ae49f61{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/6022e80c-ee5e-76b3-2ef8-b676205a65e1/hadoop.log.dir/,STOPPED} 2024-12-02T21:09:53,329 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-02T21:09:53,329 WARN [BP-1649102533-172.17.0.2-1733173762666 heartbeating to localhost/127.0.0.1:35937 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-02T21:09:53,329 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-02T21:09:53,329 WARN [BP-1649102533-172.17.0.2-1733173762666 heartbeating to localhost/127.0.0.1:35937 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1649102533-172.17.0.2-1733173762666 (Datanode Uuid 6d3579fa-125d-47cd-85cc-8fd66e40db23) service to localhost/127.0.0.1:35937 2024-12-02T21:09:53,331 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/6022e80c-ee5e-76b3-2ef8-b676205a65e1/cluster_a36dc0da-24c9-4660-2142-0a0442b4566b/dfs/data/data1/current/BP-1649102533-172.17.0.2-1733173762666 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-02T21:09:53,331 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/6022e80c-ee5e-76b3-2ef8-b676205a65e1/cluster_a36dc0da-24c9-4660-2142-0a0442b4566b/dfs/data/data2/current/BP-1649102533-172.17.0.2-1733173762666 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-02T21:09:53,331 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-02T21:09:53,342 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-02T21:09:53,346 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-02T21:09:53,346 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-02T21:09:53,346 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-02T21:09:53,347 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-02T21:09:53,347 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2806229b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/6022e80c-ee5e-76b3-2ef8-b676205a65e1/hadoop.log.dir/,AVAILABLE} 2024-12-02T21:09:53,348 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@25d63f3b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-02T21:09:53,379 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.meta.1733173708307.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:09:53,443 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@69ce67fa{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/6022e80c-ee5e-76b3-2ef8-b676205a65e1/java.io.tmpdir/jetty-localhost-43657-hadoop-hdfs-3_4_1-tests_jar-_-any-7829369514003973523/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-02T21:09:53,443 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6400f8b0{HTTP/1.1, (http/1.1)}{localhost:43657} 2024-12-02T21:09:53,443 INFO [Time-limited test {}] server.Server(415): Started @204823ms 2024-12-02T21:09:53,444 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-02T21:09:53,457 WARN [ResponseProcessor for block BP-1649102533-172.17.0.2-1733173762666:blk_1073741839_1020 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1649102533-172.17.0.2-1733173762666:blk_1073741839_1020 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T21:09:53,458 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1910428619_22 at /127.0.0.1:41304 [Receiving block BP-1649102533-172.17.0.2-1733173762666:blk_1073741839_1018] {}] datanode.DataXceiver(331): 127.0.0.1:32843:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:41304 dst: /127.0.0.1:32843 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T21:09:53,459 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@2d80f62b{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-02T21:09:53,459 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@249e2570{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-02T21:09:53,460 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-02T21:09:53,460 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6af63917{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-02T21:09:53,460 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@73270302{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/6022e80c-ee5e-76b3-2ef8-b676205a65e1/hadoop.log.dir/,STOPPED} 2024-12-02T21:09:53,461 WARN [BP-1649102533-172.17.0.2-1733173762666 heartbeating to localhost/127.0.0.1:35937 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-02T21:09:53,461 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-02T21:09:53,461 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-02T21:09:53,461 WARN [BP-1649102533-172.17.0.2-1733173762666 heartbeating to localhost/127.0.0.1:35937 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1649102533-172.17.0.2-1733173762666 (Datanode Uuid 71ceb9c5-600b-4a8b-97c6-ef649952eebf) service to localhost/127.0.0.1:35937 2024-12-02T21:09:53,462 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/6022e80c-ee5e-76b3-2ef8-b676205a65e1/cluster_a36dc0da-24c9-4660-2142-0a0442b4566b/dfs/data/data3/current/BP-1649102533-172.17.0.2-1733173762666 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-02T21:09:53,462 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/6022e80c-ee5e-76b3-2ef8-b676205a65e1/cluster_a36dc0da-24c9-4660-2142-0a0442b4566b/dfs/data/data4/current/BP-1649102533-172.17.0.2-1733173762666 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-02T21:09:53,462 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-02T21:09:53,469 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-02T21:09:53,473 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-02T21:09:53,474 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-02T21:09:53,474 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-02T21:09:53,474 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-02T21:09:53,475 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@324e724b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/6022e80c-ee5e-76b3-2ef8-b676205a65e1/hadoop.log.dir/,AVAILABLE} 2024-12-02T21:09:53,475 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@74a29034{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-02T21:09:53,567 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@e284d80{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/6022e80c-ee5e-76b3-2ef8-b676205a65e1/java.io.tmpdir/jetty-localhost-41579-hadoop-hdfs-3_4_1-tests_jar-_-any-2773227262685153118/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-02T21:09:53,567 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@15df44c3{HTTP/1.1, (http/1.1)}{localhost:41579} 2024-12-02T21:09:53,568 INFO [Time-limited test {}] server.Server(415): Started @204947ms 2024-12-02T21:09:53,568 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-02T21:09:53,735 WARN [Thread-1161 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-02T21:09:53,737 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x45775e198348d91c with lease ID 0x7c8b4e6cc8a01647: from storage DS-020c589a-a637-4a0b-91c5-4976ea7c1c25 node DatanodeRegistration(127.0.0.1:46101, datanodeUuid=6d3579fa-125d-47cd-85cc-8fd66e40db23, infoPort=40457, infoSecurePort=0, ipcPort=39167, storageInfo=lv=-57;cid=testClusterID;nsid=630606353;c=1733173762666), blocks: 8, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-02T21:09:53,737 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x45775e198348d91c with lease ID 0x7c8b4e6cc8a01647: from storage DS-c87b65bb-8a6c-4814-9307-d7ebddd821fe node DatanodeRegistration(127.0.0.1:46101, datanodeUuid=6d3579fa-125d-47cd-85cc-8fd66e40db23, infoPort=40457, infoSecurePort=0, ipcPort=39167, storageInfo=lv=-57;cid=testClusterID;nsid=630606353;c=1733173762666), blocks: 7, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-02T21:09:53,913 WARN [Thread-1181 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-02T21:09:53,915 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x7664db9555c5bdf1 with lease ID 0x7c8b4e6cc8a01648: from storage DS-a09a5dec-f5bf-45ab-bc95-4ef2c766b66a node DatanodeRegistration(127.0.0.1:42423, datanodeUuid=71ceb9c5-600b-4a8b-97c6-ef649952eebf, infoPort=38379, infoSecurePort=0, ipcPort=37159, storageInfo=lv=-57;cid=testClusterID;nsid=630606353;c=1733173762666), blocks: 8, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-02T21:09:53,915 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x7664db9555c5bdf1 with lease ID 0x7c8b4e6cc8a01648: from storage DS-0cc2c10b-820b-4fc4-8adb-2ef253f1b358 node DatanodeRegistration(127.0.0.1:42423, datanodeUuid=71ceb9c5-600b-4a8b-97c6-ef649952eebf, infoPort=38379, infoSecurePort=0, ipcPort=37159, storageInfo=lv=-57;cid=testClusterID;nsid=630606353;c=1733173762666), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-02T21:09:54,035 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-02T21:09:54,380 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.meta.1733173708307.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:09:54,541 WARN [master/7d4f3b9a7081:0:becomeActiveMaster.append-pool-0 {}] wal.FSHLog$RingBufferEventHandler(1189): Append sequenceId=95, requesting roll of WAL java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44711,DS-020c589a-a637-4a0b-91c5-4976ea7c1c25,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T21:09:54,541 DEBUG [master:store-WAL-Roller {}] wal.AbstractWALRoller(197): WAL FSHLog 7d4f3b9a7081%2C34867%2C1733173764056:(num 1733173764288) roll requested 2024-12-02T21:09:54,541 ERROR [ProcExecTimeout {}] region.RegionProcedureStore(422): Failed to delete pids=[4, 7, 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] org.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=95, requesting roll of WAL at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.append(FSHLog.java:1191) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.onEvent(FSHLog.java:1064) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.onEvent(FSHLog.java:967) ~[classes/:?] at com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:168) ~[disruptor-3.4.4.jar:?] at com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) ~[disruptor-3.4.4.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44711,DS-020c589a-a637-4a0b-91c5-4976ea7c1c25,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T21:09:54,542 INFO [master:store-WAL-Roller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7d4f3b9a7081%2C34867%2C1733173764056.1733173794541 2024-12-02T21:09:54,542 ERROR [ProcExecTimeout {}] procedure2.TimeoutExecutorThread(124): Ignoring pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner exception: org.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=95, requesting roll of WAL java.io.UncheckedIOException: org.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=95, requesting roll of WAL at org.apache.hadoop.hbase.procedure2.store.region.RegionProcedureStore.delete(RegionProcedureStore.java:423) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner.periodicExecute(CompletedProcedureCleaner.java:135) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.TimeoutExecutorThread.executeInMemoryChore(TimeoutExecutorThread.java:122) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.TimeoutExecutorThread.execDelayedProcedure(TimeoutExecutorThread.java:101) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.TimeoutExecutorThread.run(TimeoutExecutorThread.java:68) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] Caused by: org.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=95, requesting roll of WAL at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.append(FSHLog.java:1191) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.onEvent(FSHLog.java:1064) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.onEvent(FSHLog.java:967) ~[classes/:?] at com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:168) ~[disruptor-3.4.4.jar:?] at com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) ~[disruptor-3.4.4.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44711,DS-020c589a-a637-4a0b-91c5-4976ea7c1c25,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T21:09:54,552 WARN [master:store-WAL-Roller {}] wal.FSHLog(373): Failed sync-before-close but no outstanding appends; closing WALorg.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=95, requesting roll of WAL 2024-12-02T21:09:54,552 INFO [master:store-WAL-Roller {}] wal.AbstractFSWAL(837): Rolled WAL /user/jenkins/test-data/cec094ad-8298-4c24-78f9-fb346a53367e/MasterData/WALs/7d4f3b9a7081,34867,1733173764056/7d4f3b9a7081%2C34867%2C1733173764056.1733173764288 with entries=92, filesize=45.98 KB; new WAL /user/jenkins/test-data/cec094ad-8298-4c24-78f9-fb346a53367e/MasterData/WALs/7d4f3b9a7081,34867,1733173764056/7d4f3b9a7081%2C34867%2C1733173764056.1733173794541 2024-12-02T21:09:54,553 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38379:38379),(127.0.0.1/127.0.0.1:40457:40457)] 2024-12-02T21:09:54,553 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(751): hdfs://localhost:35937/user/jenkins/test-data/cec094ad-8298-4c24-78f9-fb346a53367e/MasterData/WALs/7d4f3b9a7081,34867,1733173764056/7d4f3b9a7081%2C34867%2C1733173764056.1733173764288 is not closed yet, will try archiving it next time 2024-12-02T21:09:54,553 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44711,DS-020c589a-a637-4a0b-91c5-4976ea7c1c25,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T21:09:54,553 WARN [Close-WAL-Writer-0 {}] wal.FSHLog(462): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44711,DS-020c589a-a637-4a0b-91c5-4976ea7c1c25,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T21:09:54,553 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:35937/user/jenkins/test-data/cec094ad-8298-4c24-78f9-fb346a53367e/MasterData/WALs/7d4f3b9a7081,34867,1733173764056/7d4f3b9a7081%2C34867%2C1733173764056.1733173764288 2024-12-02T21:09:54,553 WARN [IPC Server handler 1 on default port 35937 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/cec094ad-8298-4c24-78f9-fb346a53367e/MasterData/WALs/7d4f3b9a7081,34867,1733173764056/7d4f3b9a7081%2C34867%2C1733173764056.1733173764288 has not been closed. Lease recovery is in progress. RecoveryId = 1022 for block blk_1073741830_1015 2024-12-02T21:09:54,554 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:35937/user/jenkins/test-data/cec094ad-8298-4c24-78f9-fb346a53367e/MasterData/WALs/7d4f3b9a7081,34867,1733173764056/7d4f3b9a7081%2C34867%2C1733173764056.1733173764288 after 1ms 2024-12-02T21:09:54,584 INFO [Time-limited test {}] wal.TestLogRolling(366): Data Nodes restarted 2024-12-02T21:09:54,586 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1004 2024-12-02T21:09:54,587 WARN [RS:0;7d4f3b9a7081:46151.append-pool-0 {}] wal.FSHLog$RingBufferEventHandler(1189): Append sequenceId=8, requesting roll of WAL java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:32843,DS-a09a5dec-f5bf-45ab-bc95-4ef2c766b66a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T21:09:54,587 DEBUG [regionserver/7d4f3b9a7081:0.logRoller {}] wal.AbstractWALRoller(197): WAL FSHLog 7d4f3b9a7081%2C46151%2C1733173764195:(num 1733173779236) roll requested 2024-12-02T21:09:54,587 INFO [regionserver/7d4f3b9a7081:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7d4f3b9a7081%2C46151%2C1733173764195.1733173794587 2024-12-02T21:09:54,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46151 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=8, requesting roll of WAL at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.append(FSHLog.java:1191) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.onEvent(FSHLog.java:1064) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.onEvent(FSHLog.java:967) ~[classes/:?] at com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:168) ~[disruptor-3.4.4.jar:?] at com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) ~[disruptor-3.4.4.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:32843,DS-a09a5dec-f5bf-45ab-bc95-4ef2c766b66a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T21:09:54,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46151 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:33838 deadline: 1733173804586, exception=org.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=8, requesting roll of WAL 2024-12-02T21:09:54,595 DEBUG [regionserver/7d4f3b9a7081:0.logRoller {}] wal.TestLogRolling$2(324): preLogRoll: oldFile=hdfs://localhost:35937/user/jenkins/test-data/cec094ad-8298-4c24-78f9-fb346a53367e/WALs/7d4f3b9a7081,46151,1733173764195/7d4f3b9a7081%2C46151%2C1733173764195.1733173779236 newFile=hdfs://localhost:35937/user/jenkins/test-data/cec094ad-8298-4c24-78f9-fb346a53367e/WALs/7d4f3b9a7081,46151,1733173764195/7d4f3b9a7081%2C46151%2C1733173764195.1733173794587 2024-12-02T21:09:54,595 WARN [regionserver/7d4f3b9a7081:0.logRoller {}] wal.FSHLog(373): Failed sync-before-close but no outstanding appends; closing WALorg.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=8, requesting roll of WAL 2024-12-02T21:09:54,595 INFO [regionserver/7d4f3b9a7081:0.logRoller {}] wal.AbstractFSWAL(837): Rolled WAL /user/jenkins/test-data/cec094ad-8298-4c24-78f9-fb346a53367e/WALs/7d4f3b9a7081,46151,1733173764195/7d4f3b9a7081%2C46151%2C1733173764195.1733173779236 with entries=2, filesize=2.37 KB; new WAL /user/jenkins/test-data/cec094ad-8298-4c24-78f9-fb346a53367e/WALs/7d4f3b9a7081,46151,1733173764195/7d4f3b9a7081%2C46151%2C1733173764195.1733173794587 2024-12-02T21:09:54,595 DEBUG [regionserver/7d4f3b9a7081:0.logRoller {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38379:38379),(127.0.0.1/127.0.0.1:40457:40457)] 2024-12-02T21:09:54,595 DEBUG [regionserver/7d4f3b9a7081:0.logRoller {}] wal.AbstractFSWAL(751): hdfs://localhost:35937/user/jenkins/test-data/cec094ad-8298-4c24-78f9-fb346a53367e/WALs/7d4f3b9a7081,46151,1733173764195/7d4f3b9a7081%2C46151%2C1733173764195.1733173779236 is not closed yet, will try archiving it next time 2024-12-02T21:09:54,595 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:32843,DS-a09a5dec-f5bf-45ab-bc95-4ef2c766b66a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T21:09:54,595 WARN [Close-WAL-Writer-0 {}] wal.FSHLog(462): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:32843,DS-a09a5dec-f5bf-45ab-bc95-4ef2c766b66a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T21:09:54,596 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:35937/user/jenkins/test-data/cec094ad-8298-4c24-78f9-fb346a53367e/WALs/7d4f3b9a7081,46151,1733173764195/7d4f3b9a7081%2C46151%2C1733173764195.1733173779236 2024-12-02T21:09:54,596 WARN [IPC Server handler 1 on default port 35937 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/cec094ad-8298-4c24-78f9-fb346a53367e/WALs/7d4f3b9a7081,46151,1733173764195/7d4f3b9a7081%2C46151%2C1733173764195.1733173779236 has not been closed. Lease recovery is in progress. RecoveryId = 1024 for block blk_1073741839_1020 2024-12-02T21:09:54,596 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:35937/user/jenkins/test-data/cec094ad-8298-4c24-78f9-fb346a53367e/WALs/7d4f3b9a7081,46151,1733173764195/7d4f3b9a7081%2C46151%2C1733173764195.1733173779236 after 0ms 2024-12-02T21:09:55,381 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.meta.1733173708307.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:09:56,382 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.meta.1733173708307.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:09:56,741 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741839_1020: GenerationStamp not matched, existing replica is blk_1073741839_1018 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-12-02T21:09:57,384 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.meta.1733173708307.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:09:58,385 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.meta.1733173708307.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:09:58,555 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:35937/user/jenkins/test-data/cec094ad-8298-4c24-78f9-fb346a53367e/MasterData/WALs/7d4f3b9a7081,34867,1733173764056/7d4f3b9a7081%2C34867%2C1733173764056.1733173764288 after 4002ms 2024-12-02T21:09:58,598 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:35937/user/jenkins/test-data/cec094ad-8298-4c24-78f9-fb346a53367e/WALs/7d4f3b9a7081,46151,1733173764195/7d4f3b9a7081%2C46151%2C1733173764195.1733173779236 after 4002ms 2024-12-02T21:09:58,646 DEBUG [BootstrapNodeManager {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-02T21:09:58,649 INFO [RS-EventLoopGroup-8-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40430, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.3 (auth:SIMPLE), service=RegionServerStatusService 2024-12-02T21:09:59,386 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.meta.1733173708307.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:09:59,915 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 2) replica(s): 0) Failed to delete replica blk_1073741830_1015: GenerationStamp not matched, existing replica is blk_1073741830_1006 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-12-02T21:10:00,387 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.meta.1733173708307.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:10:01,388 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.meta.1733173708307.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:10:02,390 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.meta.1733173708307.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:10:03,391 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.meta.1733173708307.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:10:04,393 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.meta.1733173708307.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:10:05,395 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.meta.1733173708307.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:10:06,120 INFO [master/7d4f3b9a7081:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-12-02T21:10:06,120 INFO [master/7d4f3b9a7081:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-12-02T21:10:06,396 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.meta.1733173708307.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:10:06,687 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7d4f3b9a7081%2C46151%2C1733173764195.1733173806687 2024-12-02T21:10:06,699 DEBUG [Time-limited test {}] wal.TestLogRolling$2(324): preLogRoll: oldFile=hdfs://localhost:35937/user/jenkins/test-data/cec094ad-8298-4c24-78f9-fb346a53367e/WALs/7d4f3b9a7081,46151,1733173764195/7d4f3b9a7081%2C46151%2C1733173764195.1733173794587 newFile=hdfs://localhost:35937/user/jenkins/test-data/cec094ad-8298-4c24-78f9-fb346a53367e/WALs/7d4f3b9a7081,46151,1733173764195/7d4f3b9a7081%2C46151%2C1733173764195.1733173806687 2024-12-02T21:10:06,700 INFO [Time-limited test {}] wal.AbstractFSWAL(837): Rolled WAL /user/jenkins/test-data/cec094ad-8298-4c24-78f9-fb346a53367e/WALs/7d4f3b9a7081,46151,1733173764195/7d4f3b9a7081%2C46151%2C1733173764195.1733173794587 with entries=1, filesize=1.23 KB; new WAL /user/jenkins/test-data/cec094ad-8298-4c24-78f9-fb346a53367e/WALs/7d4f3b9a7081,46151,1733173764195/7d4f3b9a7081%2C46151%2C1733173764195.1733173806687 2024-12-02T21:10:06,700 DEBUG [Time-limited test {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38379:38379),(127.0.0.1/127.0.0.1:40457:40457)] 2024-12-02T21:10:06,700 DEBUG [Time-limited test {}] wal.AbstractFSWAL(751): hdfs://localhost:35937/user/jenkins/test-data/cec094ad-8298-4c24-78f9-fb346a53367e/WALs/7d4f3b9a7081,46151,1733173764195/7d4f3b9a7081%2C46151%2C1733173764195.1733173794587 is not closed yet, will try archiving it next time 2024-12-02T21:10:06,701 DEBUG [Time-limited test {}] wal.TestLogRolling(380): recovering lease for hdfs://localhost:35937/user/jenkins/test-data/cec094ad-8298-4c24-78f9-fb346a53367e/WALs/7d4f3b9a7081,46151,1733173764195/7d4f3b9a7081%2C46151%2C1733173764195.1733173764805 2024-12-02T21:10:06,701 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:35937/user/jenkins/test-data/cec094ad-8298-4c24-78f9-fb346a53367e/WALs/7d4f3b9a7081,46151,1733173764195/7d4f3b9a7081%2C46151%2C1733173764195.1733173764805 2024-12-02T21:10:06,701 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:35937/user/jenkins/test-data/cec094ad-8298-4c24-78f9-fb346a53367e/WALs/7d4f3b9a7081,46151,1733173764195/7d4f3b9a7081%2C46151%2C1733173764195.1733173764805 after 0ms 2024-12-02T21:10:06,701 DEBUG [Time-limited test {}] wal.TestLogRolling(384): Reading WAL /user/jenkins/test-data/cec094ad-8298-4c24-78f9-fb346a53367e/WALs/7d4f3b9a7081,46151,1733173764195/7d4f3b9a7081%2C46151%2C1733173764195.1733173764805 2024-12-02T21:10:06,702 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46101 is added to blk_1073741841_1023 (size=1264) 2024-12-02T21:10:06,703 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42423 is added to blk_1073741841_1023 (size=1264) 2024-12-02T21:10:06,710 DEBUG [Time-limited test {}] wal.TestLogRolling(389): #3: [\x00/METAFAMILY:HBASE::REGION_EVENT::REGION_OPEN/1733173765676/Put/vlen=162/seqid=0] 2024-12-02T21:10:06,710 DEBUG [Time-limited test {}] wal.TestLogRolling(389): #4: [default/info:d/1733173765722/Put/vlen=9/seqid=0] 2024-12-02T21:10:06,710 DEBUG [Time-limited test {}] wal.TestLogRolling(389): #5: [hbase/info:d/1733173765754/Put/vlen=7/seqid=0] 2024-12-02T21:10:06,710 DEBUG [Time-limited test {}] wal.TestLogRolling(389): #3: [\x00/METAFAMILY:HBASE::REGION_EVENT::REGION_OPEN/1733173766322/Put/vlen=218/seqid=0] 2024-12-02T21:10:06,710 DEBUG [Time-limited test {}] wal.TestLogRolling(389): #4: [row1002/info:/1733173775963/Put/vlen=1045/seqid=0] 2024-12-02T21:10:06,710 DEBUG [Time-limited test {}] wal.TestLogRolling(396): EOF reading file /user/jenkins/test-data/cec094ad-8298-4c24-78f9-fb346a53367e/WALs/7d4f3b9a7081,46151,1733173764195/7d4f3b9a7081%2C46151%2C1733173764195.1733173764805 2024-12-02T21:10:06,710 DEBUG [Time-limited test {}] wal.TestLogRolling(380): recovering lease for hdfs://localhost:35937/user/jenkins/test-data/cec094ad-8298-4c24-78f9-fb346a53367e/WALs/7d4f3b9a7081,46151,1733173764195/7d4f3b9a7081%2C46151%2C1733173764195.1733173779236 2024-12-02T21:10:06,710 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:35937/user/jenkins/test-data/cec094ad-8298-4c24-78f9-fb346a53367e/WALs/7d4f3b9a7081,46151,1733173764195/7d4f3b9a7081%2C46151%2C1733173764195.1733173779236 2024-12-02T21:10:06,711 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:35937/user/jenkins/test-data/cec094ad-8298-4c24-78f9-fb346a53367e/WALs/7d4f3b9a7081,46151,1733173764195/7d4f3b9a7081%2C46151%2C1733173764195.1733173779236 after 1ms 2024-12-02T21:10:06,711 DEBUG [Time-limited test {}] wal.TestLogRolling(384): Reading WAL /user/jenkins/test-data/cec094ad-8298-4c24-78f9-fb346a53367e/WALs/7d4f3b9a7081,46151,1733173764195/7d4f3b9a7081%2C46151%2C1733173764195.1733173779236 2024-12-02T21:10:06,714 DEBUG [Time-limited test {}] wal.TestLogRolling(389): #6: [row1003/info:/1733173789266/Put/vlen=1045/seqid=0] 2024-12-02T21:10:06,714 DEBUG [Time-limited test {}] wal.TestLogRolling(389): #7: [row1004/info:/1733173791269/Put/vlen=1045/seqid=0] 2024-12-02T21:10:06,714 DEBUG [Time-limited test {}] wal.TestLogRolling(396): EOF reading file /user/jenkins/test-data/cec094ad-8298-4c24-78f9-fb346a53367e/WALs/7d4f3b9a7081,46151,1733173764195/7d4f3b9a7081%2C46151%2C1733173764195.1733173779236 2024-12-02T21:10:06,714 DEBUG [Time-limited test {}] wal.TestLogRolling(380): recovering lease for hdfs://localhost:35937/user/jenkins/test-data/cec094ad-8298-4c24-78f9-fb346a53367e/WALs/7d4f3b9a7081,46151,1733173764195/7d4f3b9a7081%2C46151%2C1733173764195.1733173794587 2024-12-02T21:10:06,714 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:35937/user/jenkins/test-data/cec094ad-8298-4c24-78f9-fb346a53367e/WALs/7d4f3b9a7081,46151,1733173764195/7d4f3b9a7081%2C46151%2C1733173764195.1733173794587 2024-12-02T21:10:06,715 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:35937/user/jenkins/test-data/cec094ad-8298-4c24-78f9-fb346a53367e/WALs/7d4f3b9a7081,46151,1733173764195/7d4f3b9a7081%2C46151%2C1733173764195.1733173794587 after 1ms 2024-12-02T21:10:06,715 DEBUG [Time-limited test {}] wal.TestLogRolling(384): Reading WAL /user/jenkins/test-data/cec094ad-8298-4c24-78f9-fb346a53367e/WALs/7d4f3b9a7081,46151,1733173764195/7d4f3b9a7081%2C46151%2C1733173764195.1733173794587 2024-12-02T21:10:06,718 DEBUG [Time-limited test {}] wal.TestLogRolling(389): #9: [row1005/info:/1733173804685/Put/vlen=1045/seqid=0] 2024-12-02T21:10:06,718 DEBUG [Time-limited test {}] wal.TestLogRolling(380): recovering lease for hdfs://localhost:35937/user/jenkins/test-data/cec094ad-8298-4c24-78f9-fb346a53367e/WALs/7d4f3b9a7081,46151,1733173764195/7d4f3b9a7081%2C46151%2C1733173764195.1733173806687 2024-12-02T21:10:06,718 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:35937/user/jenkins/test-data/cec094ad-8298-4c24-78f9-fb346a53367e/WALs/7d4f3b9a7081,46151,1733173764195/7d4f3b9a7081%2C46151%2C1733173764195.1733173806687 2024-12-02T21:10:06,718 WARN [IPC Server handler 3 on default port 35937 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/cec094ad-8298-4c24-78f9-fb346a53367e/WALs/7d4f3b9a7081,46151,1733173764195/7d4f3b9a7081%2C46151%2C1733173764195.1733173806687 has not been closed. Lease recovery is in progress. RecoveryId = 1026 for block blk_1073741842_1025 2024-12-02T21:10:06,718 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:35937/user/jenkins/test-data/cec094ad-8298-4c24-78f9-fb346a53367e/WALs/7d4f3b9a7081,46151,1733173764195/7d4f3b9a7081%2C46151%2C1733173764195.1733173806687 after 0ms 2024-12-02T21:10:06,918 WARN [ResponseProcessor for block BP-1649102533-172.17.0.2-1733173762666:blk_1073741842_1025 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1649102533-172.17.0.2-1733173762666:blk_1073741842_1025 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T21:10:06,918 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-81234619_22 at /127.0.0.1:42796 [Receiving block BP-1649102533-172.17.0.2-1733173762666:blk_1073741842_1025] {}] datanode.DataXceiver(331): 127.0.0.1:42423:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:42796 dst: /127.0.0.1:42423 java.io.InterruptedIOException: Interrupted while waiting for IO on channel java.nio.channels.SocketChannel[connected local=localhost/127.0.0.1:42423 remote=/127.0.0.1:42796]. Total timeout mills is 60000, 59781 millis timeout left. at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:350) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T21:10:06,918 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-81234619_22 at /127.0.0.1:38702 [Receiving block BP-1649102533-172.17.0.2-1733173762666:blk_1073741842_1025] {}] datanode.DataXceiver(331): 127.0.0.1:46101:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:38702 dst: /127.0.0.1:46101 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T21:10:06,919 WARN [DataStreamer for file /user/jenkins/test-data/cec094ad-8298-4c24-78f9-fb346a53367e/WALs/7d4f3b9a7081,46151,1733173764195/7d4f3b9a7081%2C46151%2C1733173764195.1733173806687 block BP-1649102533-172.17.0.2-1733173762666:blk_1073741842_1025 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1649102533-172.17.0.2-1733173762666:blk_1073741842_1025 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42423,DS-a09a5dec-f5bf-45ab-bc95-4ef2c766b66a,DISK], DatanodeInfoWithStorage[127.0.0.1:46101,DS-020c589a-a637-4a0b-91c5-4976ea7c1c25,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:42423,DS-a09a5dec-f5bf-45ab-bc95-4ef2c766b66a,DISK]) is bad. 2024-12-02T21:10:06,926 WARN [DataStreamer for file /user/jenkins/test-data/cec094ad-8298-4c24-78f9-fb346a53367e/WALs/7d4f3b9a7081,46151,1733173764195/7d4f3b9a7081%2C46151%2C1733173764195.1733173806687 block BP-1649102533-172.17.0.2-1733173762666:blk_1073741842_1025 {}] hdfs.DataStreamer(859): DataStreamer Exception org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-1649102533-172.17.0.2-1733173762666:blk_1073741842_1025 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T21:10:06,927 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42423 is added to blk_1073741842_1026 (size=85) 2024-12-02T21:10:06,929 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46101 is added to blk_1073741842_1026 (size=85) 2024-12-02T21:10:07,397 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.meta.1733173708307.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:10:08,398 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.meta.1733173708307.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:10:09,400 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.meta.1733173708307.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:10:10,401 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.meta.1733173708307.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:10:10,720 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:35937/user/jenkins/test-data/cec094ad-8298-4c24-78f9-fb346a53367e/WALs/7d4f3b9a7081,46151,1733173764195/7d4f3b9a7081%2C46151%2C1733173764195.1733173806687 after 4002ms 2024-12-02T21:10:10,720 DEBUG [Time-limited test {}] wal.TestLogRolling(384): Reading WAL /user/jenkins/test-data/cec094ad-8298-4c24-78f9-fb346a53367e/WALs/7d4f3b9a7081,46151,1733173764195/7d4f3b9a7081%2C46151%2C1733173764195.1733173806687 2024-12-02T21:10:10,731 DEBUG [Time-limited test {}] wal.TestLogRolling(396): EOF reading file /user/jenkins/test-data/cec094ad-8298-4c24-78f9-fb346a53367e/WALs/7d4f3b9a7081,46151,1733173764195/7d4f3b9a7081%2C46151%2C1733173764195.1733173806687 2024-12-02T21:10:10,731 INFO [Time-limited test {}] regionserver.HRegion(2837): Flushing 16c341a4ab260c1acb120ac1a558413b 1/1 column families, dataSize=78 B heapSize=488 B 2024-12-02T21:10:10,731 WARN [RS:0;7d4f3b9a7081:46151.append-pool-0 {}] wal.FSHLog$RingBufferEventHandler(1189): Append sequenceId=7, requesting roll of WAL org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-1649102533-172.17.0.2-1733173762666:blk_1073741842_1025 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T21:10:10,732 DEBUG [regionserver/7d4f3b9a7081:0.logRoller {}] wal.AbstractWALRoller(197): WAL FSHLog 7d4f3b9a7081%2C46151%2C1733173764195:(num 1733173806687) roll requested 2024-12-02T21:10:10,732 DEBUG [Time-limited test {}] regionserver.HRegion(2538): Flush status journal for 16c341a4ab260c1acb120ac1a558413b: 2024-12-02T21:10:10,732 INFO [regionserver/7d4f3b9a7081:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7d4f3b9a7081%2C46151%2C1733173764195.1733173810732 2024-12-02T21:10:10,732 INFO [Time-limited test {}] wal.TestLogRolling(416): org.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=7, requesting roll of WAL org.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=7, requesting roll of WAL at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.append(FSHLog.java:1191) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.onEvent(FSHLog.java:1064) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.onEvent(FSHLog.java:967) ~[classes/:?] at com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:168) ~[disruptor-3.4.4.jar:?] at com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) ~[disruptor-3.4.4.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-1649102533-172.17.0.2-1733173762666:blk_1073741842_1025 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T21:10:10,733 INFO [Time-limited test {}] regionserver.HRegion(2837): Flushing 1588230740 3/3 column families, dataSize=2.90 KB heapSize=5.42 KB 2024-12-02T21:10:10,733 WARN [RS_OPEN_META-regionserver/7d4f3b9a7081:0-0.append-pool-0 {}] wal.FSHLog$RingBufferEventHandler(1189): Append sequenceId=15, requesting roll of WAL java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44711,DS-020c589a-a637-4a0b-91c5-4976ea7c1c25,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T21:10:10,733 DEBUG [Time-limited test {}] regionserver.HRegion(2538): Flush status journal for 1588230740: 2024-12-02T21:10:10,733 INFO [Time-limited test {}] wal.TestLogRolling(416): org.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=15, requesting roll of WAL org.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=15, requesting roll of WAL at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.append(FSHLog.java:1191) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.onEvent(FSHLog.java:1064) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.onEvent(FSHLog.java:967) ~[classes/:?] at com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:168) ~[disruptor-3.4.4.jar:?] at com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) ~[disruptor-3.4.4.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44711,DS-020c589a-a637-4a0b-91c5-4976ea7c1c25,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T21:10:10,733 INFO [Time-limited test {}] regionserver.HRegion(2837): Flushing 6b26b280a8a37661df64b9f3e64f75d5 1/1 column families, dataSize=4.20 KB heapSize=4.75 KB 2024-12-02T21:10:10,734 DEBUG [Time-limited test {}] regionserver.HRegion(2538): Flush status journal for 6b26b280a8a37661df64b9f3e64f75d5: 2024-12-02T21:10:10,734 INFO [Time-limited test {}] wal.TestLogRolling(416): org.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=7, requesting roll of WAL org.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=7, requesting roll of WAL at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.append(FSHLog.java:1191) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.onEvent(FSHLog.java:1064) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.onEvent(FSHLog.java:967) ~[classes/:?] at com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:168) ~[disruptor-3.4.4.jar:?] at com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) ~[disruptor-3.4.4.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-1649102533-172.17.0.2-1733173762666:blk_1073741842_1025 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T21:10:10,736 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1340): Shutting down minicluster 2024-12-02T21:10:10,736 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-12-02T21:10:10,736 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x0592832b to 127.0.0.1:57382 2024-12-02T21:10:10,736 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T21:10:10,736 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-02T21:10:10,737 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=881623050, stopped=false 2024-12-02T21:10:10,737 INFO [Time-limited test {}] master.ServerManager(987): Cluster shutdown requested of master=7d4f3b9a7081,34867,1733173764056 2024-12-02T21:10:10,739 DEBUG [regionserver/7d4f3b9a7081:0.logRoller {}] wal.TestLogRolling$2(324): preLogRoll: oldFile=hdfs://localhost:35937/user/jenkins/test-data/cec094ad-8298-4c24-78f9-fb346a53367e/WALs/7d4f3b9a7081,46151,1733173764195/7d4f3b9a7081%2C46151%2C1733173764195.1733173806687 newFile=hdfs://localhost:35937/user/jenkins/test-data/cec094ad-8298-4c24-78f9-fb346a53367e/WALs/7d4f3b9a7081,46151,1733173764195/7d4f3b9a7081%2C46151%2C1733173764195.1733173810732 2024-12-02T21:10:10,739 WARN [regionserver/7d4f3b9a7081:0.logRoller {}] wal.FSHLog(373): Failed sync-before-close but no outstanding appends; closing WALorg.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=7, requesting roll of WAL 2024-12-02T21:10:10,739 INFO [regionserver/7d4f3b9a7081:0.logRoller {}] wal.AbstractFSWAL(837): Rolled WAL /user/jenkins/test-data/cec094ad-8298-4c24-78f9-fb346a53367e/WALs/7d4f3b9a7081,46151,1733173764195/7d4f3b9a7081%2C46151%2C1733173764195.1733173806687 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/cec094ad-8298-4c24-78f9-fb346a53367e/WALs/7d4f3b9a7081,46151,1733173764195/7d4f3b9a7081%2C46151%2C1733173764195.1733173810732 2024-12-02T21:10:10,739 DEBUG [regionserver/7d4f3b9a7081:0.logRoller {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38379:38379),(127.0.0.1/127.0.0.1:40457:40457)] 2024-12-02T21:10:10,739 DEBUG [regionserver/7d4f3b9a7081:0.logRoller {}] wal.AbstractFSWAL(751): hdfs://localhost:35937/user/jenkins/test-data/cec094ad-8298-4c24-78f9-fb346a53367e/WALs/7d4f3b9a7081,46151,1733173764195/7d4f3b9a7081%2C46151%2C1733173764195.1733173806687 is not closed yet, will try archiving it next time 2024-12-02T21:10:10,739 DEBUG [regionserver/7d4f3b9a7081:0.logRoller {}] wal.AbstractWALRoller(197): WAL FSHLog 7d4f3b9a7081%2C46151%2C1733173764195.meta:.meta(num 1733173765199) roll requested 2024-12-02T21:10:10,739 INFO [regionserver/7d4f3b9a7081:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7d4f3b9a7081%2C46151%2C1733173764195.meta.1733173810739.meta 2024-12-02T21:10:10,739 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-1649102533-172.17.0.2-1733173762666:blk_1073741842_1025 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T21:10:10,739 WARN [Close-WAL-Writer-0 {}] wal.FSHLog(462): close old writer failed. org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-1649102533-172.17.0.2-1733173762666:blk_1073741842_1025 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T21:10:10,740 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:35937/user/jenkins/test-data/cec094ad-8298-4c24-78f9-fb346a53367e/WALs/7d4f3b9a7081,46151,1733173764195/7d4f3b9a7081%2C46151%2C1733173764195.1733173806687 2024-12-02T21:10:10,740 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:35937/user/jenkins/test-data/cec094ad-8298-4c24-78f9-fb346a53367e/WALs/7d4f3b9a7081,46151,1733173764195/7d4f3b9a7081%2C46151%2C1733173764195.1733173806687 after 0ms 2024-12-02T21:10:10,741 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(818): Archiving hdfs://localhost:35937/user/jenkins/test-data/cec094ad-8298-4c24-78f9-fb346a53367e/WALs/7d4f3b9a7081,46151,1733173764195/7d4f3b9a7081%2C46151%2C1733173764195.1733173806687 to hdfs://localhost:35937/user/jenkins/test-data/cec094ad-8298-4c24-78f9-fb346a53367e/oldWALs/7d4f3b9a7081%2C46151%2C1733173764195.1733173806687 2024-12-02T21:10:10,744 WARN [regionserver/7d4f3b9a7081:0.logRoller {}] wal.FSHLog(373): Failed sync-before-close but no outstanding appends; closing WALorg.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=15, requesting roll of WAL 2024-12-02T21:10:10,745 INFO [regionserver/7d4f3b9a7081:0.logRoller {}] wal.AbstractFSWAL(837): Rolled WAL /user/jenkins/test-data/cec094ad-8298-4c24-78f9-fb346a53367e/WALs/7d4f3b9a7081,46151,1733173764195/7d4f3b9a7081%2C46151%2C1733173764195.meta.1733173765199.meta with entries=11, filesize=3.66 KB; new WAL /user/jenkins/test-data/cec094ad-8298-4c24-78f9-fb346a53367e/WALs/7d4f3b9a7081,46151,1733173764195/7d4f3b9a7081%2C46151%2C1733173764195.meta.1733173810739.meta 2024-12-02T21:10:10,745 DEBUG [regionserver/7d4f3b9a7081:0.logRoller {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38379:38379),(127.0.0.1/127.0.0.1:40457:40457)] 2024-12-02T21:10:10,745 DEBUG [regionserver/7d4f3b9a7081:0.logRoller {}] wal.AbstractFSWAL(751): hdfs://localhost:35937/user/jenkins/test-data/cec094ad-8298-4c24-78f9-fb346a53367e/WALs/7d4f3b9a7081,46151,1733173764195/7d4f3b9a7081%2C46151%2C1733173764195.meta.1733173765199.meta is not closed yet, will try archiving it next time 2024-12-02T21:10:10,745 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44711,DS-020c589a-a637-4a0b-91c5-4976ea7c1c25,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T21:10:10,745 WARN [Close-WAL-Writer-0 {}] wal.FSHLog(462): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44711,DS-020c589a-a637-4a0b-91c5-4976ea7c1c25,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T21:10:10,745 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:35937/user/jenkins/test-data/cec094ad-8298-4c24-78f9-fb346a53367e/WALs/7d4f3b9a7081,46151,1733173764195/7d4f3b9a7081%2C46151%2C1733173764195.meta.1733173765199.meta 2024-12-02T21:10:10,746 WARN [IPC Server handler 3 on default port 35937 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/cec094ad-8298-4c24-78f9-fb346a53367e/WALs/7d4f3b9a7081,46151,1733173764195/7d4f3b9a7081%2C46151%2C1733173764195.meta.1733173765199.meta has not been closed. Lease recovery is in progress. RecoveryId = 1029 for block blk_1073741834_1017 2024-12-02T21:10:10,746 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:35937/user/jenkins/test-data/cec094ad-8298-4c24-78f9-fb346a53367e/WALs/7d4f3b9a7081,46151,1733173764195/7d4f3b9a7081%2C46151%2C1733173764195.meta.1733173765199.meta after 1ms 2024-12-02T21:10:10,795 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46151-0x101992a823a0001, quorum=127.0.0.1:57382, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-02T21:10:10,795 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34867-0x101992a823a0000, quorum=127.0.0.1:57382, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-02T21:10:10,795 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46151-0x101992a823a0001, quorum=127.0.0.1:57382, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T21:10:10,795 INFO [Time-limited test {}] procedure2.ProcedureExecutor(700): Stopping 2024-12-02T21:10:10,795 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34867-0x101992a823a0000, quorum=127.0.0.1:57382, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T21:10:10,795 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T21:10:10,795 INFO [Time-limited test {}] regionserver.HRegionServer(2561): ***** STOPPING region server '7d4f3b9a7081,46151,1733173764195' ***** 2024-12-02T21:10:10,796 INFO [Time-limited test {}] regionserver.HRegionServer(2575): STOPPED: Shutdown requested 2024-12-02T21:10:10,796 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:46151-0x101992a823a0001, quorum=127.0.0.1:57382, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-02T21:10:10,796 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:34867-0x101992a823a0000, quorum=127.0.0.1:57382, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-02T21:10:10,796 INFO [RS:0;7d4f3b9a7081:46151 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-02T21:10:10,797 INFO [RS:0;7d4f3b9a7081:46151 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-02T21:10:10,797 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(373): MemStoreFlusher.0 exiting 2024-12-02T21:10:10,797 INFO [RS:0;7d4f3b9a7081:46151 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-02T21:10:10,797 INFO [RS:0;7d4f3b9a7081:46151 {}] regionserver.HRegionServer(3579): Received CLOSE for 16c341a4ab260c1acb120ac1a558413b 2024-12-02T21:10:10,798 INFO [RS:0;7d4f3b9a7081:46151 {}] regionserver.HRegionServer(3579): Received CLOSE for 6b26b280a8a37661df64b9f3e64f75d5 2024-12-02T21:10:10,798 INFO [RS:0;7d4f3b9a7081:46151 {}] regionserver.HRegionServer(1224): stopping server 7d4f3b9a7081,46151,1733173764195 2024-12-02T21:10:10,798 DEBUG [RS:0;7d4f3b9a7081:46151 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T21:10:10,798 DEBUG [RS_CLOSE_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1681): Closing 16c341a4ab260c1acb120ac1a558413b, disabling compactions & flushes 2024-12-02T21:10:10,798 INFO [RS:0;7d4f3b9a7081:46151 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-02T21:10:10,798 INFO [RS_CLOSE_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1703): Closing region hbase:namespace,,1733173765303.16c341a4ab260c1acb120ac1a558413b. 2024-12-02T21:10:10,798 INFO [RS:0;7d4f3b9a7081:46151 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-02T21:10:10,798 INFO [RS:0;7d4f3b9a7081:46151 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-02T21:10:10,798 DEBUG [RS_CLOSE_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:namespace,,1733173765303.16c341a4ab260c1acb120ac1a558413b. 2024-12-02T21:10:10,798 DEBUG [RS_CLOSE_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1791): Acquired close lock on hbase:namespace,,1733173765303.16c341a4ab260c1acb120ac1a558413b. after waiting 0 ms 2024-12-02T21:10:10,798 INFO [RS:0;7d4f3b9a7081:46151 {}] regionserver.HRegionServer(3579): Received CLOSE for 1588230740 2024-12-02T21:10:10,798 DEBUG [RS_CLOSE_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1801): Updates disabled for region hbase:namespace,,1733173765303.16c341a4ab260c1acb120ac1a558413b. 2024-12-02T21:10:10,799 INFO [RS_CLOSE_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2837): Flushing 16c341a4ab260c1acb120ac1a558413b 1/1 column families, dataSize=78 B heapSize=728 B 2024-12-02T21:10:10,799 INFO [RS:0;7d4f3b9a7081:46151 {}] regionserver.HRegionServer(1599): Waiting on 3 regions to close 2024-12-02T21:10:10,799 DEBUG [RS:0;7d4f3b9a7081:46151 {}] regionserver.HRegionServer(1603): Online Regions={16c341a4ab260c1acb120ac1a558413b=hbase:namespace,,1733173765303.16c341a4ab260c1acb120ac1a558413b., 1588230740=hbase:meta,,1.1588230740, 6b26b280a8a37661df64b9f3e64f75d5=TestLogRolling-testLogRollOnPipelineRestart,,1733173765944.6b26b280a8a37661df64b9f3e64f75d5.} 2024-12-02T21:10:10,799 WARN [RS_CLOSE_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultMemStore(92): Snapshot called again without clearing previous. Doing nothing. Another ongoing flush or did we fail last attempt? 2024-12-02T21:10:10,799 DEBUG [RS:0;7d4f3b9a7081:46151 {}] regionserver.HRegionServer(1629): Waiting on 1588230740, 16c341a4ab260c1acb120ac1a558413b, 6b26b280a8a37661df64b9f3e64f75d5 2024-12-02T21:10:10,799 DEBUG [RS_CLOSE_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1681): Closing 1588230740, disabling compactions & flushes 2024-12-02T21:10:10,799 INFO [RS_CLOSE_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1703): Closing region hbase:meta,,1.1588230740 2024-12-02T21:10:10,799 DEBUG [RS_CLOSE_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:meta,,1.1588230740 2024-12-02T21:10:10,799 DEBUG [RS_CLOSE_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1791): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-02T21:10:10,800 DEBUG [RS_CLOSE_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1801): Updates disabled for region hbase:meta,,1.1588230740 2024-12-02T21:10:10,800 INFO [RS_CLOSE_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2837): Flushing 1588230740 3/3 column families, dataSize=2.90 KB heapSize=5.89 KB 2024-12-02T21:10:10,800 WARN [RS_CLOSE_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultMemStore(92): Snapshot called again without clearing previous. Doing nothing. Another ongoing flush or did we fail last attempt? 2024-12-02T21:10:10,800 WARN [RS_CLOSE_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultMemStore(92): Snapshot called again without clearing previous. Doing nothing. Another ongoing flush or did we fail last attempt? 2024-12-02T21:10:10,815 DEBUG [RS_CLOSE_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35937/user/jenkins/test-data/cec094ad-8298-4c24-78f9-fb346a53367e/data/hbase/namespace/16c341a4ab260c1acb120ac1a558413b/.tmp/info/8ba4bb08e148485ea1781f530fb1ecff is 45, key is default/info:d/1733173765722/Put/seqid=0 2024-12-02T21:10:10,816 DEBUG [RS_CLOSE_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35937/user/jenkins/test-data/cec094ad-8298-4c24-78f9-fb346a53367e/data/hbase/meta/1588230740/.tmp/info/602754bc1c5c4cae87abcf3ce1651c03 is 207, key is TestLogRolling-testLogRollOnPipelineRestart,,1733173765944.6b26b280a8a37661df64b9f3e64f75d5./info:regioninfo/1733173766326/Put/seqid=0 2024-12-02T21:10:10,820 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42423 is added to blk_1073741845_1030 (size=5037) 2024-12-02T21:10:10,820 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46101 is added to blk_1073741845_1030 (size=5037) 2024-12-02T21:10:10,821 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42423 is added to blk_1073741846_1031 (size=8268) 2024-12-02T21:10:10,821 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46101 is added to blk_1073741846_1031 (size=8268) 2024-12-02T21:10:10,821 INFO [RS_CLOSE_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.66 KB at sequenceid=16 (bloomFilter=true), to=hdfs://localhost:35937/user/jenkins/test-data/cec094ad-8298-4c24-78f9-fb346a53367e/data/hbase/meta/1588230740/.tmp/info/602754bc1c5c4cae87abcf3ce1651c03 2024-12-02T21:10:10,821 INFO [RS_CLOSE_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=78 B at sequenceid=8 (bloomFilter=true), to=hdfs://localhost:35937/user/jenkins/test-data/cec094ad-8298-4c24-78f9-fb346a53367e/data/hbase/namespace/16c341a4ab260c1acb120ac1a558413b/.tmp/info/8ba4bb08e148485ea1781f530fb1ecff 2024-12-02T21:10:10,827 DEBUG [RS_CLOSE_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35937/user/jenkins/test-data/cec094ad-8298-4c24-78f9-fb346a53367e/data/hbase/namespace/16c341a4ab260c1acb120ac1a558413b/.tmp/info/8ba4bb08e148485ea1781f530fb1ecff as hdfs://localhost:35937/user/jenkins/test-data/cec094ad-8298-4c24-78f9-fb346a53367e/data/hbase/namespace/16c341a4ab260c1acb120ac1a558413b/info/8ba4bb08e148485ea1781f530fb1ecff 2024-12-02T21:10:10,833 INFO [RS_CLOSE_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35937/user/jenkins/test-data/cec094ad-8298-4c24-78f9-fb346a53367e/data/hbase/namespace/16c341a4ab260c1acb120ac1a558413b/info/8ba4bb08e148485ea1781f530fb1ecff, entries=2, sequenceid=8, filesize=4.9 K 2024-12-02T21:10:10,834 INFO [RS_CLOSE_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3040): Finished flush of dataSize ~78 B/78, heapSize ~472 B/472, currentSize=0 B/0 for 16c341a4ab260c1acb120ac1a558413b in 36ms, sequenceid=8, compaction requested=false 2024-12-02T21:10:10,839 DEBUG [RS_CLOSE_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:35937/user/jenkins/test-data/cec094ad-8298-4c24-78f9-fb346a53367e/data/hbase/namespace/16c341a4ab260c1acb120ac1a558413b/recovered.edits/11.seqid, newMaxSeqId=11, maxSeqId=1 2024-12-02T21:10:10,840 INFO [RS_CLOSE_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1922): Closed hbase:namespace,,1733173765303.16c341a4ab260c1acb120ac1a558413b. 2024-12-02T21:10:10,840 DEBUG [RS_CLOSE_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1635): Region close journal for 16c341a4ab260c1acb120ac1a558413b: 2024-12-02T21:10:10,840 DEBUG [RS_CLOSE_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed hbase:namespace,,1733173765303.16c341a4ab260c1acb120ac1a558413b. 2024-12-02T21:10:10,840 DEBUG [RS_CLOSE_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1681): Closing 6b26b280a8a37661df64b9f3e64f75d5, disabling compactions & flushes 2024-12-02T21:10:10,840 INFO [RS_CLOSE_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1703): Closing region TestLogRolling-testLogRollOnPipelineRestart,,1733173765944.6b26b280a8a37661df64b9f3e64f75d5. 2024-12-02T21:10:10,840 DEBUG [RS_CLOSE_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestLogRolling-testLogRollOnPipelineRestart,,1733173765944.6b26b280a8a37661df64b9f3e64f75d5. 2024-12-02T21:10:10,840 DEBUG [RS_CLOSE_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1791): Acquired close lock on TestLogRolling-testLogRollOnPipelineRestart,,1733173765944.6b26b280a8a37661df64b9f3e64f75d5. after waiting 0 ms 2024-12-02T21:10:10,840 DEBUG [RS_CLOSE_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1801): Updates disabled for region TestLogRolling-testLogRollOnPipelineRestart,,1733173765944.6b26b280a8a37661df64b9f3e64f75d5. 2024-12-02T21:10:10,840 INFO [RS_CLOSE_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2837): Flushing 6b26b280a8a37661df64b9f3e64f75d5 1/1 column families, dataSize=4.20 KB heapSize=4.98 KB 2024-12-02T21:10:10,840 WARN [RS_CLOSE_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultMemStore(92): Snapshot called again without clearing previous. Doing nothing. Another ongoing flush or did we fail last attempt? 2024-12-02T21:10:10,843 DEBUG [RS_CLOSE_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35937/user/jenkins/test-data/cec094ad-8298-4c24-78f9-fb346a53367e/data/hbase/meta/1588230740/.tmp/table/068a2561969847c2bdc0d810fcc97851 is 79, key is TestLogRolling-testLogRollOnPipelineRestart/table:state/1733173766332/Put/seqid=0 2024-12-02T21:10:10,852 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42423 is added to blk_1073741847_1032 (size=5482) 2024-12-02T21:10:10,852 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46101 is added to blk_1073741847_1032 (size=5482) 2024-12-02T21:10:10,861 DEBUG [RS_CLOSE_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35937/user/jenkins/test-data/cec094ad-8298-4c24-78f9-fb346a53367e/data/default/TestLogRolling-testLogRollOnPipelineRestart/6b26b280a8a37661df64b9f3e64f75d5/.tmp/info/f3c965024e6944b493d5137084134f57 is 1080, key is row1002/info:/1733173775963/Put/seqid=0 2024-12-02T21:10:10,867 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46101 is added to blk_1073741848_1033 (size=9270) 2024-12-02T21:10:10,867 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42423 is added to blk_1073741848_1033 (size=9270) 2024-12-02T21:10:10,868 INFO [RS_CLOSE_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.20 KB at sequenceid=12 (bloomFilter=true), to=hdfs://localhost:35937/user/jenkins/test-data/cec094ad-8298-4c24-78f9-fb346a53367e/data/default/TestLogRolling-testLogRollOnPipelineRestart/6b26b280a8a37661df64b9f3e64f75d5/.tmp/info/f3c965024e6944b493d5137084134f57 2024-12-02T21:10:10,875 DEBUG [RS_CLOSE_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35937/user/jenkins/test-data/cec094ad-8298-4c24-78f9-fb346a53367e/data/default/TestLogRolling-testLogRollOnPipelineRestart/6b26b280a8a37661df64b9f3e64f75d5/.tmp/info/f3c965024e6944b493d5137084134f57 as hdfs://localhost:35937/user/jenkins/test-data/cec094ad-8298-4c24-78f9-fb346a53367e/data/default/TestLogRolling-testLogRollOnPipelineRestart/6b26b280a8a37661df64b9f3e64f75d5/info/f3c965024e6944b493d5137084134f57 2024-12-02T21:10:10,882 INFO [RS_CLOSE_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35937/user/jenkins/test-data/cec094ad-8298-4c24-78f9-fb346a53367e/data/default/TestLogRolling-testLogRollOnPipelineRestart/6b26b280a8a37661df64b9f3e64f75d5/info/f3c965024e6944b493d5137084134f57, entries=4, sequenceid=12, filesize=9.1 K 2024-12-02T21:10:10,883 INFO [RS_CLOSE_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3040): Finished flush of dataSize ~4.20 KB/4304, heapSize ~4.73 KB/4848, currentSize=0 B/0 for 6b26b280a8a37661df64b9f3e64f75d5 in 43ms, sequenceid=12, compaction requested=false 2024-12-02T21:10:10,887 DEBUG [RS_CLOSE_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:35937/user/jenkins/test-data/cec094ad-8298-4c24-78f9-fb346a53367e/data/default/TestLogRolling-testLogRollOnPipelineRestart/6b26b280a8a37661df64b9f3e64f75d5/recovered.edits/15.seqid, newMaxSeqId=15, maxSeqId=1 2024-12-02T21:10:10,888 INFO [RS_CLOSE_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1922): Closed TestLogRolling-testLogRollOnPipelineRestart,,1733173765944.6b26b280a8a37661df64b9f3e64f75d5. 2024-12-02T21:10:10,888 DEBUG [RS_CLOSE_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1635): Region close journal for 6b26b280a8a37661df64b9f3e64f75d5: 2024-12-02T21:10:10,888 DEBUG [RS_CLOSE_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRollOnPipelineRestart,,1733173765944.6b26b280a8a37661df64b9f3e64f75d5. 2024-12-02T21:10:10,999 DEBUG [RS:0;7d4f3b9a7081:46151 {}] regionserver.HRegionServer(1629): Waiting on 1588230740 2024-12-02T21:10:11,200 DEBUG [RS:0;7d4f3b9a7081:46151 {}] regionserver.HRegionServer(1629): Waiting on 1588230740 2024-12-02T21:10:11,253 INFO [RS_CLOSE_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=244 B at sequenceid=16 (bloomFilter=true), to=hdfs://localhost:35937/user/jenkins/test-data/cec094ad-8298-4c24-78f9-fb346a53367e/data/hbase/meta/1588230740/.tmp/table/068a2561969847c2bdc0d810fcc97851 2024-12-02T21:10:11,269 DEBUG [RS_CLOSE_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35937/user/jenkins/test-data/cec094ad-8298-4c24-78f9-fb346a53367e/data/hbase/meta/1588230740/.tmp/info/602754bc1c5c4cae87abcf3ce1651c03 as hdfs://localhost:35937/user/jenkins/test-data/cec094ad-8298-4c24-78f9-fb346a53367e/data/hbase/meta/1588230740/info/602754bc1c5c4cae87abcf3ce1651c03 2024-12-02T21:10:11,276 INFO [RS_CLOSE_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35937/user/jenkins/test-data/cec094ad-8298-4c24-78f9-fb346a53367e/data/hbase/meta/1588230740/info/602754bc1c5c4cae87abcf3ce1651c03, entries=20, sequenceid=16, filesize=8.1 K 2024-12-02T21:10:11,278 DEBUG [RS_CLOSE_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35937/user/jenkins/test-data/cec094ad-8298-4c24-78f9-fb346a53367e/data/hbase/meta/1588230740/.tmp/table/068a2561969847c2bdc0d810fcc97851 as hdfs://localhost:35937/user/jenkins/test-data/cec094ad-8298-4c24-78f9-fb346a53367e/data/hbase/meta/1588230740/table/068a2561969847c2bdc0d810fcc97851 2024-12-02T21:10:11,284 INFO [RS_CLOSE_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35937/user/jenkins/test-data/cec094ad-8298-4c24-78f9-fb346a53367e/data/hbase/meta/1588230740/table/068a2561969847c2bdc0d810fcc97851, entries=4, sequenceid=16, filesize=5.4 K 2024-12-02T21:10:11,285 INFO [RS_CLOSE_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3040): Finished flush of dataSize ~2.90 KB/2972, heapSize ~5.14 KB/5264, currentSize=0 B/0 for 1588230740 in 485ms, sequenceid=16, compaction requested=false 2024-12-02T21:10:11,289 DEBUG [RS_CLOSE_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:35937/user/jenkins/test-data/cec094ad-8298-4c24-78f9-fb346a53367e/data/hbase/meta/1588230740/recovered.edits/19.seqid, newMaxSeqId=19, maxSeqId=1 2024-12-02T21:10:11,290 DEBUG [RS_CLOSE_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-02T21:10:11,290 INFO [RS_CLOSE_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1922): Closed hbase:meta,,1.1588230740 2024-12-02T21:10:11,290 DEBUG [RS_CLOSE_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1635): Region close journal for 1588230740: 2024-12-02T21:10:11,290 DEBUG [RS_CLOSE_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-02T21:10:11,400 INFO [RS:0;7d4f3b9a7081:46151 {}] regionserver.HRegionServer(1250): stopping server 7d4f3b9a7081,46151,1733173764195; all regions closed. 2024-12-02T21:10:11,401 DEBUG [WAL-Shutdown-0 {}] wal.FSHLog(499): Closing WAL writer in /user/jenkins/test-data/cec094ad-8298-4c24-78f9-fb346a53367e/WALs/7d4f3b9a7081,46151,1733173764195 2024-12-02T21:10:11,402 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.meta.1733173708307.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:10:11,404 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46101 is added to blk_1073741844_1028 (size=761) 2024-12-02T21:10:11,405 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42423 is added to blk_1073741844_1028 (size=761) 2024-12-02T21:10:11,673 INFO [regionserver/7d4f3b9a7081:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-12-02T21:10:11,673 INFO [regionserver/7d4f3b9a7081:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-12-02T21:10:12,404 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.meta.1733173708307.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:10:12,676 INFO [regionserver/7d4f3b9a7081:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-02T21:10:13,405 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.meta.1733173708307.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:10:14,008 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart 2024-12-02T21:10:14,406 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.meta.1733173708307.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:10:14,746 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:35937/user/jenkins/test-data/cec094ad-8298-4c24-78f9-fb346a53367e/WALs/7d4f3b9a7081,46151,1733173764195/7d4f3b9a7081%2C46151%2C1733173764195.meta.1733173765199.meta after 4001ms 2024-12-02T21:10:14,747 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(818): Archiving hdfs://localhost:35937/user/jenkins/test-data/cec094ad-8298-4c24-78f9-fb346a53367e/WALs/7d4f3b9a7081,46151,1733173764195/7d4f3b9a7081%2C46151%2C1733173764195.meta.1733173765199.meta to hdfs://localhost:35937/user/jenkins/test-data/cec094ad-8298-4c24-78f9-fb346a53367e/oldWALs/7d4f3b9a7081%2C46151%2C1733173764195.meta.1733173765199.meta 2024-12-02T21:10:14,750 DEBUG [RS:0;7d4f3b9a7081:46151 {}] wal.AbstractFSWAL(1071): Moved 1 WAL file(s) to /user/jenkins/test-data/cec094ad-8298-4c24-78f9-fb346a53367e/oldWALs 2024-12-02T21:10:14,750 INFO [RS:0;7d4f3b9a7081:46151 {}] wal.AbstractFSWAL(1074): Closed WAL: FSHLog 7d4f3b9a7081%2C46151%2C1733173764195.meta:.meta(num 1733173810739) 2024-12-02T21:10:14,750 DEBUG [WAL-Shutdown-0 {}] wal.FSHLog(499): Closing WAL writer in /user/jenkins/test-data/cec094ad-8298-4c24-78f9-fb346a53367e/WALs/7d4f3b9a7081,46151,1733173764195 2024-12-02T21:10:14,752 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46101 is added to blk_1073741843_1027 (size=1979) 2024-12-02T21:10:14,752 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42423 is added to blk_1073741843_1027 (size=1979) 2024-12-02T21:10:14,758 DEBUG [RS:0;7d4f3b9a7081:46151 {}] wal.AbstractFSWAL(1071): Moved 4 WAL file(s) to /user/jenkins/test-data/cec094ad-8298-4c24-78f9-fb346a53367e/oldWALs 2024-12-02T21:10:14,758 INFO [RS:0;7d4f3b9a7081:46151 {}] wal.AbstractFSWAL(1074): Closed WAL: FSHLog 7d4f3b9a7081%2C46151%2C1733173764195:(num 1733173810732) 2024-12-02T21:10:14,758 DEBUG [RS:0;7d4f3b9a7081:46151 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T21:10:14,758 INFO [RS:0;7d4f3b9a7081:46151 {}] regionserver.LeaseManager(133): Closed leases 2024-12-02T21:10:14,758 INFO [RS:0;7d4f3b9a7081:46151 {}] hbase.ChoreService(370): Chore service for: regionserver/7d4f3b9a7081:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS] on shutdown 2024-12-02T21:10:14,758 INFO [regionserver/7d4f3b9a7081:0.logRoller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-12-02T21:10:14,759 INFO [RS:0;7d4f3b9a7081:46151 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:46151 2024-12-02T21:10:14,769 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34867-0x101992a823a0000, quorum=127.0.0.1:57382, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-02T21:10:14,769 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46151-0x101992a823a0001, quorum=127.0.0.1:57382, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/7d4f3b9a7081,46151,1733173764195 2024-12-02T21:10:14,778 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [7d4f3b9a7081,46151,1733173764195] 2024-12-02T21:10:14,778 DEBUG [RegionServerTracker-0 {}] master.DeadServer(103): Processing 7d4f3b9a7081,46151,1733173764195; numProcessing=1 2024-12-02T21:10:14,786 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/draining/7d4f3b9a7081,46151,1733173764195 already deleted, retry=false 2024-12-02T21:10:14,786 INFO [RegionServerTracker-0 {}] master.ServerManager(652): Cluster shutdown set; 7d4f3b9a7081,46151,1733173764195 expired; onlineServers=0 2024-12-02T21:10:14,786 INFO [RegionServerTracker-0 {}] regionserver.HRegionServer(2561): ***** STOPPING region server '7d4f3b9a7081,34867,1733173764056' ***** 2024-12-02T21:10:14,786 INFO [RegionServerTracker-0 {}] regionserver.HRegionServer(2575): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-02T21:10:14,786 DEBUG [M:0;7d4f3b9a7081:34867 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7b225d5f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=7d4f3b9a7081/172.17.0.2:0 2024-12-02T21:10:14,786 INFO [M:0;7d4f3b9a7081:34867 {}] regionserver.HRegionServer(1224): stopping server 7d4f3b9a7081,34867,1733173764056 2024-12-02T21:10:14,786 INFO [M:0;7d4f3b9a7081:34867 {}] regionserver.HRegionServer(1250): stopping server 7d4f3b9a7081,34867,1733173764056; all regions closed. 2024-12-02T21:10:14,786 DEBUG [M:0;7d4f3b9a7081:34867 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T21:10:14,786 DEBUG [M:0;7d4f3b9a7081:34867 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-02T21:10:14,786 DEBUG [M:0;7d4f3b9a7081:34867 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-02T21:10:14,786 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-02T21:10:14,786 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster-HFileCleaner.large.0-1733173764544 {}] cleaner.HFileCleaner(306): Exit Thread[master/7d4f3b9a7081:0:becomeActiveMaster-HFileCleaner.large.0-1733173764544,5,FailOnTimeoutGroup] 2024-12-02T21:10:14,786 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster-HFileCleaner.small.0-1733173764544 {}] cleaner.HFileCleaner(306): Exit Thread[master/7d4f3b9a7081:0:becomeActiveMaster-HFileCleaner.small.0-1733173764544,5,FailOnTimeoutGroup] 2024-12-02T21:10:14,787 INFO [M:0;7d4f3b9a7081:34867 {}] hbase.ChoreService(370): Chore service for: master/7d4f3b9a7081:0 had [] on shutdown 2024-12-02T21:10:14,787 DEBUG [M:0;7d4f3b9a7081:34867 {}] master.HMaster(1733): Stopping service threads 2024-12-02T21:10:14,787 INFO [M:0;7d4f3b9a7081:34867 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-02T21:10:14,787 INFO [M:0;7d4f3b9a7081:34867 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-02T21:10:14,787 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-02T21:10:14,794 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34867-0x101992a823a0000, quorum=127.0.0.1:57382, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-02T21:10:14,794 DEBUG [M:0;7d4f3b9a7081:34867 {}] zookeeper.ZKUtil(347): master:34867-0x101992a823a0000, quorum=127.0.0.1:57382, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-02T21:10:14,794 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34867-0x101992a823a0000, quorum=127.0.0.1:57382, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T21:10:14,794 WARN [M:0;7d4f3b9a7081:34867 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-02T21:10:14,794 INFO [M:0;7d4f3b9a7081:34867 {}] assignment.AssignmentManager(391): Stopping assignment manager 2024-12-02T21:10:14,795 INFO [M:0;7d4f3b9a7081:34867 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-02T21:10:14,795 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:34867-0x101992a823a0000, quorum=127.0.0.1:57382, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-02T21:10:14,795 DEBUG [M:0;7d4f3b9a7081:34867 {}] regionserver.HRegion(1681): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-02T21:10:14,795 INFO [M:0;7d4f3b9a7081:34867 {}] regionserver.HRegion(1703): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-02T21:10:14,795 DEBUG [M:0;7d4f3b9a7081:34867 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-02T21:10:14,795 DEBUG [M:0;7d4f3b9a7081:34867 {}] regionserver.HRegion(1791): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-02T21:10:14,795 DEBUG [M:0;7d4f3b9a7081:34867 {}] regionserver.HRegion(1801): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-02T21:10:14,795 INFO [M:0;7d4f3b9a7081:34867 {}] regionserver.HRegion(2837): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=40.08 KB heapSize=49.23 KB 2024-12-02T21:10:14,815 DEBUG [M:0;7d4f3b9a7081:34867 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35937/user/jenkins/test-data/cec094ad-8298-4c24-78f9-fb346a53367e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/da4816a012b0492d8de84a683f326e05 is 82, key is hbase:meta,,1/info:regioninfo/1733173765221/Put/seqid=0 2024-12-02T21:10:14,820 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42423 is added to blk_1073741849_1034 (size=5672) 2024-12-02T21:10:14,820 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46101 is added to blk_1073741849_1034 (size=5672) 2024-12-02T21:10:14,821 INFO [M:0;7d4f3b9a7081:34867 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=96 (bloomFilter=true), to=hdfs://localhost:35937/user/jenkins/test-data/cec094ad-8298-4c24-78f9-fb346a53367e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/da4816a012b0492d8de84a683f326e05 2024-12-02T21:10:14,842 DEBUG [M:0;7d4f3b9a7081:34867 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35937/user/jenkins/test-data/cec094ad-8298-4c24-78f9-fb346a53367e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/80653410edb247cb87511e3d88bf7175 is 777, key is \x00\x00\x00\x00\x00\x00\x00\x09/proc:d/1733173766407/Put/seqid=0 2024-12-02T21:10:14,847 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46101 is added to blk_1073741850_1035 (size=7468) 2024-12-02T21:10:14,847 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42423 is added to blk_1073741850_1035 (size=7468) 2024-12-02T21:10:14,848 INFO [M:0;7d4f3b9a7081:34867 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=39.48 KB at sequenceid=96 (bloomFilter=true), to=hdfs://localhost:35937/user/jenkins/test-data/cec094ad-8298-4c24-78f9-fb346a53367e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/80653410edb247cb87511e3d88bf7175 2024-12-02T21:10:14,868 DEBUG [M:0;7d4f3b9a7081:34867 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35937/user/jenkins/test-data/cec094ad-8298-4c24-78f9-fb346a53367e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/704b3394190d40ec86d84ccf6d0c84c2 is 69, key is 7d4f3b9a7081,46151,1733173764195/rs:state/1733173764650/Put/seqid=0 2024-12-02T21:10:14,876 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42423 is added to blk_1073741851_1036 (size=5156) 2024-12-02T21:10:14,876 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46101 is added to blk_1073741851_1036 (size=5156) 2024-12-02T21:10:14,878 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46151-0x101992a823a0001, quorum=127.0.0.1:57382, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-02T21:10:14,878 INFO [RS:0;7d4f3b9a7081:46151 {}] regionserver.HRegionServer(1307): Exiting; stopping=7d4f3b9a7081,46151,1733173764195; zookeeper connection closed. 2024-12-02T21:10:14,878 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46151-0x101992a823a0001, quorum=127.0.0.1:57382, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-02T21:10:14,878 INFO [M:0;7d4f3b9a7081:34867 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=96 (bloomFilter=true), to=hdfs://localhost:35937/user/jenkins/test-data/cec094ad-8298-4c24-78f9-fb346a53367e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/704b3394190d40ec86d84ccf6d0c84c2 2024-12-02T21:10:14,878 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@83a0f75 {}] hbase.MiniHBaseCluster$SingleFileSystemShutdownThread(216): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@83a0f75 2024-12-02T21:10:14,878 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-12-02T21:10:14,898 DEBUG [M:0;7d4f3b9a7081:34867 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35937/user/jenkins/test-data/cec094ad-8298-4c24-78f9-fb346a53367e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/f38463c21407479a9f37efb0e900f32d is 52, key is load_balancer_on/state:d/1733173765937/Put/seqid=0 2024-12-02T21:10:14,902 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42423 is added to blk_1073741852_1037 (size=5056) 2024-12-02T21:10:14,903 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46101 is added to blk_1073741852_1037 (size=5056) 2024-12-02T21:10:14,903 INFO [M:0;7d4f3b9a7081:34867 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=96 (bloomFilter=true), to=hdfs://localhost:35937/user/jenkins/test-data/cec094ad-8298-4c24-78f9-fb346a53367e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/f38463c21407479a9f37efb0e900f32d 2024-12-02T21:10:14,908 DEBUG [M:0;7d4f3b9a7081:34867 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35937/user/jenkins/test-data/cec094ad-8298-4c24-78f9-fb346a53367e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/da4816a012b0492d8de84a683f326e05 as hdfs://localhost:35937/user/jenkins/test-data/cec094ad-8298-4c24-78f9-fb346a53367e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/da4816a012b0492d8de84a683f326e05 2024-12-02T21:10:14,914 INFO [M:0;7d4f3b9a7081:34867 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35937/user/jenkins/test-data/cec094ad-8298-4c24-78f9-fb346a53367e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/da4816a012b0492d8de84a683f326e05, entries=8, sequenceid=96, filesize=5.5 K 2024-12-02T21:10:14,915 DEBUG [M:0;7d4f3b9a7081:34867 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35937/user/jenkins/test-data/cec094ad-8298-4c24-78f9-fb346a53367e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/80653410edb247cb87511e3d88bf7175 as hdfs://localhost:35937/user/jenkins/test-data/cec094ad-8298-4c24-78f9-fb346a53367e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/80653410edb247cb87511e3d88bf7175 2024-12-02T21:10:14,917 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741834_1017: GenerationStamp not matched, existing replica is blk_1073741834_1010 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-12-02T21:10:14,921 INFO [M:0;7d4f3b9a7081:34867 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35937/user/jenkins/test-data/cec094ad-8298-4c24-78f9-fb346a53367e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/80653410edb247cb87511e3d88bf7175, entries=11, sequenceid=96, filesize=7.3 K 2024-12-02T21:10:14,922 DEBUG [M:0;7d4f3b9a7081:34867 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35937/user/jenkins/test-data/cec094ad-8298-4c24-78f9-fb346a53367e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/704b3394190d40ec86d84ccf6d0c84c2 as hdfs://localhost:35937/user/jenkins/test-data/cec094ad-8298-4c24-78f9-fb346a53367e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/704b3394190d40ec86d84ccf6d0c84c2 2024-12-02T21:10:14,927 INFO [M:0;7d4f3b9a7081:34867 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35937/user/jenkins/test-data/cec094ad-8298-4c24-78f9-fb346a53367e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/704b3394190d40ec86d84ccf6d0c84c2, entries=1, sequenceid=96, filesize=5.0 K 2024-12-02T21:10:14,928 DEBUG [M:0;7d4f3b9a7081:34867 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35937/user/jenkins/test-data/cec094ad-8298-4c24-78f9-fb346a53367e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/f38463c21407479a9f37efb0e900f32d as hdfs://localhost:35937/user/jenkins/test-data/cec094ad-8298-4c24-78f9-fb346a53367e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/f38463c21407479a9f37efb0e900f32d 2024-12-02T21:10:14,933 INFO [M:0;7d4f3b9a7081:34867 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35937/user/jenkins/test-data/cec094ad-8298-4c24-78f9-fb346a53367e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/f38463c21407479a9f37efb0e900f32d, entries=1, sequenceid=96, filesize=4.9 K 2024-12-02T21:10:14,934 INFO [M:0;7d4f3b9a7081:34867 {}] regionserver.HRegion(3040): Finished flush of dataSize ~40.08 KB/41040, heapSize ~49.16 KB/50344, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 139ms, sequenceid=96, compaction requested=false 2024-12-02T21:10:14,935 INFO [M:0;7d4f3b9a7081:34867 {}] regionserver.HRegion(1922): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-02T21:10:14,935 DEBUG [M:0;7d4f3b9a7081:34867 {}] regionserver.HRegion(1635): Region close journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-02T21:10:14,935 DEBUG [WAL-Shutdown-0 {}] wal.FSHLog(499): Closing WAL writer in /user/jenkins/test-data/cec094ad-8298-4c24-78f9-fb346a53367e/MasterData/WALs/7d4f3b9a7081,34867,1733173764056 2024-12-02T21:10:14,937 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46101 is added to blk_1073741840_1021 (size=757) 2024-12-02T21:10:14,937 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42423 is added to blk_1073741840_1021 (size=757) 2024-12-02T21:10:14,937 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-12-02T21:10:14,937 INFO [M:0;7d4f3b9a7081:34867 {}] flush.MasterFlushTableProcedureManager(91): stop: server shutting down. 2024-12-02T21:10:14,937 INFO [M:0;7d4f3b9a7081:34867 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:34867 2024-12-02T21:10:14,944 DEBUG [M:0;7d4f3b9a7081:34867 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/rs/7d4f3b9a7081,34867,1733173764056 already deleted, retry=false 2024-12-02T21:10:15,053 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34867-0x101992a823a0000, quorum=127.0.0.1:57382, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-02T21:10:15,053 INFO [M:0;7d4f3b9a7081:34867 {}] regionserver.HRegionServer(1307): Exiting; stopping=7d4f3b9a7081,34867,1733173764056; zookeeper connection closed. 2024-12-02T21:10:15,053 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34867-0x101992a823a0000, quorum=127.0.0.1:57382, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-02T21:10:15,056 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@e284d80{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-02T21:10:15,057 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@15df44c3{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-02T21:10:15,057 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-02T21:10:15,057 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@74a29034{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-02T21:10:15,057 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@324e724b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/6022e80c-ee5e-76b3-2ef8-b676205a65e1/hadoop.log.dir/,STOPPED} 2024-12-02T21:10:15,059 WARN [BP-1649102533-172.17.0.2-1733173762666 heartbeating to localhost/127.0.0.1:35937 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-02T21:10:15,059 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-02T21:10:15,059 WARN [BP-1649102533-172.17.0.2-1733173762666 heartbeating to localhost/127.0.0.1:35937 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1649102533-172.17.0.2-1733173762666 (Datanode Uuid 71ceb9c5-600b-4a8b-97c6-ef649952eebf) service to localhost/127.0.0.1:35937 2024-12-02T21:10:15,059 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-02T21:10:15,060 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/6022e80c-ee5e-76b3-2ef8-b676205a65e1/cluster_a36dc0da-24c9-4660-2142-0a0442b4566b/dfs/data/data3/current/BP-1649102533-172.17.0.2-1733173762666 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-02T21:10:15,060 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/6022e80c-ee5e-76b3-2ef8-b676205a65e1/cluster_a36dc0da-24c9-4660-2142-0a0442b4566b/dfs/data/data4/current/BP-1649102533-172.17.0.2-1733173762666 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-02T21:10:15,060 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-02T21:10:15,062 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@69ce67fa{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-02T21:10:15,063 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6400f8b0{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-02T21:10:15,063 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-02T21:10:15,063 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@25d63f3b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-02T21:10:15,063 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2806229b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/6022e80c-ee5e-76b3-2ef8-b676205a65e1/hadoop.log.dir/,STOPPED} 2024-12-02T21:10:15,065 WARN [BP-1649102533-172.17.0.2-1733173762666 heartbeating to localhost/127.0.0.1:35937 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-02T21:10:15,065 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-02T21:10:15,065 WARN [BP-1649102533-172.17.0.2-1733173762666 heartbeating to localhost/127.0.0.1:35937 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1649102533-172.17.0.2-1733173762666 (Datanode Uuid 6d3579fa-125d-47cd-85cc-8fd66e40db23) service to localhost/127.0.0.1:35937 2024-12-02T21:10:15,065 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-02T21:10:15,065 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/6022e80c-ee5e-76b3-2ef8-b676205a65e1/cluster_a36dc0da-24c9-4660-2142-0a0442b4566b/dfs/data/data1/current/BP-1649102533-172.17.0.2-1733173762666 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-02T21:10:15,066 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/6022e80c-ee5e-76b3-2ef8-b676205a65e1/cluster_a36dc0da-24c9-4660-2142-0a0442b4566b/dfs/data/data2/current/BP-1649102533-172.17.0.2-1733173762666 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-02T21:10:15,066 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-02T21:10:15,073 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@29709a78{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-02T21:10:15,073 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@26116fb5{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-02T21:10:15,073 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-02T21:10:15,073 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@721f1cbb{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-02T21:10:15,074 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@aa7e0a3{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/6022e80c-ee5e-76b3-2ef8-b676205a65e1/hadoop.log.dir/,STOPPED} 2024-12-02T21:10:15,078 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(345): Shutdown MiniZK cluster with all ZK servers 2024-12-02T21:10:15,099 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1347): Minicluster is down 2024-12-02T21:10:15,105 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnPipelineRestart Thread=102 (was 88) Potentially hanging thread: LeaseRenewer:jenkins.hfs.3@localhost:35937 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (356647748) connection to localhost/127.0.0.1:35937 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: RS-EventLoopGroup-8-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:35937 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-27-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (356647748) connection to localhost/127.0.0.1:35937 from jenkins.hfs.3 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-27-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:35937 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-26-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS-EventLoopGroup-8-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-6 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-5 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:35937 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS-EventLoopGroup-9-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (356647748) connection to localhost/127.0.0.1:35937 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: RS-EventLoopGroup-9-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-28-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:35937 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-26-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-26-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-28-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS-EventLoopGroup-9-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS-EventLoopGroup-8-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-29-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-29-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-28-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Command processor java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Potentially hanging thread: nioEventLoopGroup-29-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-27-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=452 (was 424) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=54 (was 49) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=7930 (was 6919) - AvailableMemoryMB LEAK? - 2024-12-02T21:10:15,134 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testCompactionRecordDoesntBlockRolling Thread=102, OpenFileDescriptor=452, MaxFileDescriptor=1048576, SystemLoadAverage=54, ProcessCount=11, AvailableMemoryMB=7929 2024-12-02T21:10:15,135 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1126): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-02T21:10:15,135 INFO [Time-limited test {}] hbase.HBaseTestingUtility(451): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/6022e80c-ee5e-76b3-2ef8-b676205a65e1/hadoop.log.dir so I do NOT create it in target/test-data/e0d70a6d-1c1b-d5f4-3b2c-c3eb34b63681 2024-12-02T21:10:15,135 INFO [Time-limited test {}] hbase.HBaseTestingUtility(451): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/6022e80c-ee5e-76b3-2ef8-b676205a65e1/hadoop.tmp.dir so I do NOT create it in target/test-data/e0d70a6d-1c1b-d5f4-3b2c-c3eb34b63681 2024-12-02T21:10:15,135 INFO [Time-limited test {}] hbase.HBaseZKTestingUtility(82): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e0d70a6d-1c1b-d5f4-3b2c-c3eb34b63681/cluster_6c800ea8-c534-eaaf-9cff-a3cfa2451d6b, deleteOnExit=true 2024-12-02T21:10:15,135 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1140): STARTING DFS 2024-12-02T21:10:15,135 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e0d70a6d-1c1b-d5f4-3b2c-c3eb34b63681/test.cache.data in system properties and HBase conf 2024-12-02T21:10:15,135 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e0d70a6d-1c1b-d5f4-3b2c-c3eb34b63681/hadoop.tmp.dir in system properties and HBase conf 2024-12-02T21:10:15,135 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e0d70a6d-1c1b-d5f4-3b2c-c3eb34b63681/hadoop.log.dir in system properties and HBase conf 2024-12-02T21:10:15,136 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e0d70a6d-1c1b-d5f4-3b2c-c3eb34b63681/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-02T21:10:15,136 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e0d70a6d-1c1b-d5f4-3b2c-c3eb34b63681/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-02T21:10:15,136 INFO [Time-limited test {}] hbase.HBaseTestingUtility(811): read short circuit is OFF 2024-12-02T21:10:15,136 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-02T21:10:15,136 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e0d70a6d-1c1b-d5f4-3b2c-c3eb34b63681/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-02T21:10:15,136 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e0d70a6d-1c1b-d5f4-3b2c-c3eb34b63681/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-02T21:10:15,136 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e0d70a6d-1c1b-d5f4-3b2c-c3eb34b63681/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-02T21:10:15,136 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e0d70a6d-1c1b-d5f4-3b2c-c3eb34b63681/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-02T21:10:15,137 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e0d70a6d-1c1b-d5f4-3b2c-c3eb34b63681/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-02T21:10:15,137 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e0d70a6d-1c1b-d5f4-3b2c-c3eb34b63681/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-02T21:10:15,137 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e0d70a6d-1c1b-d5f4-3b2c-c3eb34b63681/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-02T21:10:15,137 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e0d70a6d-1c1b-d5f4-3b2c-c3eb34b63681/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-02T21:10:15,137 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e0d70a6d-1c1b-d5f4-3b2c-c3eb34b63681/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-02T21:10:15,137 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e0d70a6d-1c1b-d5f4-3b2c-c3eb34b63681/nfs.dump.dir in system properties and HBase conf 2024-12-02T21:10:15,137 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e0d70a6d-1c1b-d5f4-3b2c-c3eb34b63681/java.io.tmpdir in system properties and HBase conf 2024-12-02T21:10:15,137 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e0d70a6d-1c1b-d5f4-3b2c-c3eb34b63681/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-02T21:10:15,137 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e0d70a6d-1c1b-d5f4-3b2c-c3eb34b63681/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-02T21:10:15,138 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e0d70a6d-1c1b-d5f4-3b2c-c3eb34b63681/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-02T21:10:15,155 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-02T21:10:15,407 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.meta.1733173708307.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:10:15,417 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-02T21:10:15,424 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-02T21:10:15,436 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-02T21:10:15,436 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-02T21:10:15,436 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-02T21:10:15,441 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-02T21:10:15,442 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@40c6fefe{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e0d70a6d-1c1b-d5f4-3b2c-c3eb34b63681/hadoop.log.dir/,AVAILABLE} 2024-12-02T21:10:15,442 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4ac584cd{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-02T21:10:15,543 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@2c32686b{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e0d70a6d-1c1b-d5f4-3b2c-c3eb34b63681/java.io.tmpdir/jetty-localhost-34687-hadoop-hdfs-3_4_1-tests_jar-_-any-12935758892099294505/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-02T21:10:15,544 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@118f9356{HTTP/1.1, (http/1.1)}{localhost:34687} 2024-12-02T21:10:15,544 INFO [Time-limited test {}] server.Server(415): Started @226924ms 2024-12-02T21:10:15,561 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-02T21:10:15,747 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-02T21:10:15,751 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-02T21:10:15,759 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-02T21:10:15,759 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-02T21:10:15,759 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-02T21:10:15,760 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6186507e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e0d70a6d-1c1b-d5f4-3b2c-c3eb34b63681/hadoop.log.dir/,AVAILABLE} 2024-12-02T21:10:15,760 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@32ab8a68{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-02T21:10:15,857 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@6e7d6ea1{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e0d70a6d-1c1b-d5f4-3b2c-c3eb34b63681/java.io.tmpdir/jetty-localhost-33987-hadoop-hdfs-3_4_1-tests_jar-_-any-17259354209068031920/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-02T21:10:15,857 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@1fd419fc{HTTP/1.1, (http/1.1)}{localhost:33987} 2024-12-02T21:10:15,857 INFO [Time-limited test {}] server.Server(415): Started @227237ms 2024-12-02T21:10:15,858 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:10:15,858 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:10:15,858 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:10:15,858 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:10:15,858 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:10:15,859 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:10:15,861 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:10:15,861 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:10:15,861 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:10:15,863 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:10:15,867 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:10:15,867 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:10:15,868 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:10:15,868 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:10:15,869 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-02T21:10:15,921 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-02T21:10:15,922 WARN [Time-limited test {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:10:15,923 WARN [Time-limited test {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:10:15,923 WARN [Time-limited test {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:10:15,923 WARN [Time-limited test {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:10:15,937 WARN [Time-limited test {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:10:15,937 WARN [Time-limited test {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:10:15,937 WARN [Time-limited test {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:10:15,937 WARN [Time-limited test {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:10:15,938 WARN [Time-limited test {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:10:15,938 WARN [Time-limited test {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:10:15,943 WARN [Time-limited test {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:10:15,943 WARN [Time-limited test {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:10:15,944 WARN [Time-limited test {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:10:15,946 WARN [Time-limited test {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:10:15,956 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-02T21:10:15,958 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-02T21:10:15,960 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-02T21:10:15,960 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-02T21:10:15,960 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-02T21:10:15,961 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@494f0f0c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e0d70a6d-1c1b-d5f4-3b2c-c3eb34b63681/hadoop.log.dir/,AVAILABLE} 2024-12-02T21:10:15,961 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7d251d77{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-02T21:10:16,079 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@b409b3f{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e0d70a6d-1c1b-d5f4-3b2c-c3eb34b63681/java.io.tmpdir/jetty-localhost-37445-hadoop-hdfs-3_4_1-tests_jar-_-any-517915688120481174/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-02T21:10:16,079 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@546a7485{HTTP/1.1, (http/1.1)}{localhost:37445} 2024-12-02T21:10:16,079 INFO [Time-limited test {}] server.Server(415): Started @227459ms 2024-12-02T21:10:16,081 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-02T21:10:16,408 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.meta.1733173708307.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:10:16,420 WARN [Thread-1396 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e0d70a6d-1c1b-d5f4-3b2c-c3eb34b63681/cluster_6c800ea8-c534-eaaf-9cff-a3cfa2451d6b/dfs/data/data1/current/BP-2001958662-172.17.0.2-1733173815169/current, will proceed with Du for space computation calculation, 2024-12-02T21:10:16,420 WARN [Thread-1397 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e0d70a6d-1c1b-d5f4-3b2c-c3eb34b63681/cluster_6c800ea8-c534-eaaf-9cff-a3cfa2451d6b/dfs/data/data2/current/BP-2001958662-172.17.0.2-1733173815169/current, will proceed with Du for space computation calculation, 2024-12-02T21:10:16,447 WARN [Thread-1361 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-02T21:10:16,450 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x8c1ab1c4bba9f9fd with lease ID 0x8b152e7f01ec30e: Processing first storage report for DS-5d4cc80e-fae6-424d-a166-1d50197ddc15 from datanode DatanodeRegistration(127.0.0.1:44765, datanodeUuid=e1c08ab6-f856-4cea-a27b-d21956f3ae93, infoPort=33051, infoSecurePort=0, ipcPort=38337, storageInfo=lv=-57;cid=testClusterID;nsid=1522726841;c=1733173815169) 2024-12-02T21:10:16,450 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x8c1ab1c4bba9f9fd with lease ID 0x8b152e7f01ec30e: from storage DS-5d4cc80e-fae6-424d-a166-1d50197ddc15 node DatanodeRegistration(127.0.0.1:44765, datanodeUuid=e1c08ab6-f856-4cea-a27b-d21956f3ae93, infoPort=33051, infoSecurePort=0, ipcPort=38337, storageInfo=lv=-57;cid=testClusterID;nsid=1522726841;c=1733173815169), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-02T21:10:16,450 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x8c1ab1c4bba9f9fd with lease ID 0x8b152e7f01ec30e: Processing first storage report for DS-cde0ac5a-c742-458e-a1c0-3f7a3a5ddb38 from datanode DatanodeRegistration(127.0.0.1:44765, datanodeUuid=e1c08ab6-f856-4cea-a27b-d21956f3ae93, infoPort=33051, infoSecurePort=0, ipcPort=38337, storageInfo=lv=-57;cid=testClusterID;nsid=1522726841;c=1733173815169) 2024-12-02T21:10:16,450 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x8c1ab1c4bba9f9fd with lease ID 0x8b152e7f01ec30e: from storage DS-cde0ac5a-c742-458e-a1c0-3f7a3a5ddb38 node DatanodeRegistration(127.0.0.1:44765, datanodeUuid=e1c08ab6-f856-4cea-a27b-d21956f3ae93, infoPort=33051, infoSecurePort=0, ipcPort=38337, storageInfo=lv=-57;cid=testClusterID;nsid=1522726841;c=1733173815169), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-02T21:10:16,653 WARN [Thread-1410 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e0d70a6d-1c1b-d5f4-3b2c-c3eb34b63681/cluster_6c800ea8-c534-eaaf-9cff-a3cfa2451d6b/dfs/data/data4/current/BP-2001958662-172.17.0.2-1733173815169/current, will proceed with Du for space computation calculation, 2024-12-02T21:10:16,653 WARN [Thread-1409 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e0d70a6d-1c1b-d5f4-3b2c-c3eb34b63681/cluster_6c800ea8-c534-eaaf-9cff-a3cfa2451d6b/dfs/data/data3/current/BP-2001958662-172.17.0.2-1733173815169/current, will proceed with Du for space computation calculation, 2024-12-02T21:10:16,676 WARN [Thread-1386 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-02T21:10:16,678 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x587d85a72293f906 with lease ID 0x8b152e7f01ec30f: Processing first storage report for DS-6932215a-077c-4136-934a-078825baa9fd from datanode DatanodeRegistration(127.0.0.1:34831, datanodeUuid=69a2293b-2994-49b5-a07c-6712e747be0a, infoPort=46493, infoSecurePort=0, ipcPort=43415, storageInfo=lv=-57;cid=testClusterID;nsid=1522726841;c=1733173815169) 2024-12-02T21:10:16,678 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x587d85a72293f906 with lease ID 0x8b152e7f01ec30f: from storage DS-6932215a-077c-4136-934a-078825baa9fd node DatanodeRegistration(127.0.0.1:34831, datanodeUuid=69a2293b-2994-49b5-a07c-6712e747be0a, infoPort=46493, infoSecurePort=0, ipcPort=43415, storageInfo=lv=-57;cid=testClusterID;nsid=1522726841;c=1733173815169), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-02T21:10:16,679 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x587d85a72293f906 with lease ID 0x8b152e7f01ec30f: Processing first storage report for DS-42089452-b23a-458a-ad89-8478071f752c from datanode DatanodeRegistration(127.0.0.1:34831, datanodeUuid=69a2293b-2994-49b5-a07c-6712e747be0a, infoPort=46493, infoSecurePort=0, ipcPort=43415, storageInfo=lv=-57;cid=testClusterID;nsid=1522726841;c=1733173815169) 2024-12-02T21:10:16,679 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x587d85a72293f906 with lease ID 0x8b152e7f01ec30f: from storage DS-42089452-b23a-458a-ad89-8478071f752c node DatanodeRegistration(127.0.0.1:34831, datanodeUuid=69a2293b-2994-49b5-a07c-6712e747be0a, infoPort=46493, infoSecurePort=0, ipcPort=43415, storageInfo=lv=-57;cid=testClusterID;nsid=1522726841;c=1733173815169), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-02T21:10:16,715 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(703): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e0d70a6d-1c1b-d5f4-3b2c-c3eb34b63681 2024-12-02T21:10:16,717 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(259): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e0d70a6d-1c1b-d5f4-3b2c-c3eb34b63681/cluster_6c800ea8-c534-eaaf-9cff-a3cfa2451d6b/zookeeper_0, clientPort=59576, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e0d70a6d-1c1b-d5f4-3b2c-c3eb34b63681/cluster_6c800ea8-c534-eaaf-9cff-a3cfa2451d6b/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e0d70a6d-1c1b-d5f4-3b2c-c3eb34b63681/cluster_6c800ea8-c534-eaaf-9cff-a3cfa2451d6b/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-02T21:10:16,718 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(284): Started MiniZooKeeperCluster and ran 'stat' on client port=59576 2024-12-02T21:10:16,719 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T21:10:16,720 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T21:10:16,732 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44765 is added to blk_1073741825_1001 (size=7) 2024-12-02T21:10:16,732 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34831 is added to blk_1073741825_1001 (size=7) 2024-12-02T21:10:16,734 INFO [Time-limited test {}] util.FSUtils(490): Created version file at hdfs://localhost:43731/user/jenkins/test-data/7fb631ac-f516-8a5a-0a27-1cd781752804 with version=8 2024-12-02T21:10:16,734 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1462): The hbase.fs.tmp.dir is set to hdfs://localhost:39905/user/jenkins/test-data/2cbae3b8-f761-787a-953c-0b58f120ed37/hbase-staging 2024-12-02T21:10:16,736 INFO [Time-limited test {}] client.ConnectionUtils(129): master/7d4f3b9a7081:0 server-side Connection retries=45 2024-12-02T21:10:16,736 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-02T21:10:16,736 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-02T21:10:16,736 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-02T21:10:16,736 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-02T21:10:16,736 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-02T21:10:16,736 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-02T21:10:16,737 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-02T21:10:16,738 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:44147 2024-12-02T21:10:16,738 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T21:10:16,740 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T21:10:16,741 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=master:44147 connecting to ZooKeeper ensemble=127.0.0.1:59576 2024-12-02T21:10:16,834 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:441470x0, quorum=127.0.0.1:59576, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-02T21:10:16,834 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:44147-0x101992b50010000 connected 2024-12-02T21:10:17,019 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:44147-0x101992b50010000, quorum=127.0.0.1:59576, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-02T21:10:17,020 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:44147-0x101992b50010000, quorum=127.0.0.1:59576, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-02T21:10:17,021 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:44147-0x101992b50010000, quorum=127.0.0.1:59576, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-02T21:10:17,021 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=44147 2024-12-02T21:10:17,021 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=44147 2024-12-02T21:10:17,026 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=44147 2024-12-02T21:10:17,030 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=44147 2024-12-02T21:10:17,031 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=44147 2024-12-02T21:10:17,031 INFO [Time-limited test {}] master.HMaster(488): hbase.rootdir=hdfs://localhost:43731/user/jenkins/test-data/7fb631ac-f516-8a5a-0a27-1cd781752804, hbase.cluster.distributed=false 2024-12-02T21:10:17,044 INFO [Time-limited test {}] client.ConnectionUtils(129): regionserver/7d4f3b9a7081:0 server-side Connection retries=45 2024-12-02T21:10:17,044 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-02T21:10:17,044 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-02T21:10:17,044 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-02T21:10:17,044 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-02T21:10:17,044 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-02T21:10:17,044 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-02T21:10:17,045 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-02T21:10:17,045 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:42581 2024-12-02T21:10:17,045 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-02T21:10:17,047 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-02T21:10:17,047 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T21:10:17,049 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T21:10:17,051 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=regionserver:42581 connecting to ZooKeeper ensemble=127.0.0.1:59576 2024-12-02T21:10:17,061 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:425810x0, quorum=127.0.0.1:59576, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-02T21:10:17,061 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:425810x0, quorum=127.0.0.1:59576, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-02T21:10:17,061 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:42581-0x101992b50010001 connected 2024-12-02T21:10:17,062 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:42581-0x101992b50010001, quorum=127.0.0.1:59576, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-02T21:10:17,062 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:42581-0x101992b50010001, quorum=127.0.0.1:59576, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-02T21:10:17,062 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=42581 2024-12-02T21:10:17,063 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=42581 2024-12-02T21:10:17,063 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=42581 2024-12-02T21:10:17,064 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=42581 2024-12-02T21:10:17,064 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=42581 2024-12-02T21:10:17,066 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] master.HMaster(2445): Adding backup master ZNode /hbase/backup-masters/7d4f3b9a7081,44147,1733173816735 2024-12-02T21:10:17,076 DEBUG [M:0;7d4f3b9a7081:44147 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;7d4f3b9a7081:44147 2024-12-02T21:10:17,077 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44147-0x101992b50010000, quorum=127.0.0.1:59576, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-02T21:10:17,077 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42581-0x101992b50010001, quorum=127.0.0.1:59576, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-02T21:10:17,077 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:44147-0x101992b50010000, quorum=127.0.0.1:59576, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/7d4f3b9a7081,44147,1733173816735 2024-12-02T21:10:17,085 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42581-0x101992b50010001, quorum=127.0.0.1:59576, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-02T21:10:17,085 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44147-0x101992b50010000, quorum=127.0.0.1:59576, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-02T21:10:17,086 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44147-0x101992b50010000, quorum=127.0.0.1:59576, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T21:10:17,086 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42581-0x101992b50010001, quorum=127.0.0.1:59576, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T21:10:17,086 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:44147-0x101992b50010000, quorum=127.0.0.1:59576, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-02T21:10:17,086 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/7d4f3b9a7081,44147,1733173816735 from backup master directory 2024-12-02T21:10:17,094 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42581-0x101992b50010001, quorum=127.0.0.1:59576, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-02T21:10:17,094 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44147-0x101992b50010000, quorum=127.0.0.1:59576, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/7d4f3b9a7081,44147,1733173816735 2024-12-02T21:10:17,094 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44147-0x101992b50010000, quorum=127.0.0.1:59576, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-02T21:10:17,094 WARN [master/7d4f3b9a7081:0:becomeActiveMaster {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-02T21:10:17,094 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=7d4f3b9a7081,44147,1733173816735 2024-12-02T21:10:17,094 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(111): master:44147-0x101992b50010000, quorum=127.0.0.1:59576, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-02T21:10:17,104 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44765 is added to blk_1073741826_1002 (size=42) 2024-12-02T21:10:17,105 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34831 is added to blk_1073741826_1002 (size=42) 2024-12-02T21:10:17,106 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] util.FSUtils(639): Created cluster ID file at hdfs://localhost:43731/user/jenkins/test-data/7fb631ac-f516-8a5a-0a27-1cd781752804/hbase.id with ID: e16b76fd-1123-413b-bcf8-62aae384aed5 2024-12-02T21:10:17,119 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T21:10:17,136 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42581-0x101992b50010001, quorum=127.0.0.1:59576, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T21:10:17,136 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44147-0x101992b50010000, quorum=127.0.0.1:59576, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T21:10:17,144 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44765 is added to blk_1073741827_1003 (size=196) 2024-12-02T21:10:17,144 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34831 is added to blk_1073741827_1003 (size=196) 2024-12-02T21:10:17,145 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] region.MasterRegion(372): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-02T21:10:17,145 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-02T21:10:17,145 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-02T21:10:17,152 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34831 is added to blk_1073741828_1004 (size=1189) 2024-12-02T21:10:17,152 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44765 is added to blk_1073741828_1004 (size=1189) 2024-12-02T21:10:17,153 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] regionserver.HRegion(7124): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:43731/user/jenkins/test-data/7fb631ac-f516-8a5a-0a27-1cd781752804/MasterData/data/master/store 2024-12-02T21:10:17,159 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34831 is added to blk_1073741829_1005 (size=34) 2024-12-02T21:10:17,159 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44765 is added to blk_1073741829_1005 (size=34) 2024-12-02T21:10:17,160 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] regionserver.HRegion(894): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T21:10:17,160 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] regionserver.HRegion(1681): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-02T21:10:17,160 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] regionserver.HRegion(1703): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-02T21:10:17,160 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] regionserver.HRegion(1724): Waiting without time limit for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-02T21:10:17,160 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] regionserver.HRegion(1791): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-02T21:10:17,160 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] regionserver.HRegion(1801): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-02T21:10:17,160 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] regionserver.HRegion(1922): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-02T21:10:17,160 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] regionserver.HRegion(1635): Region close journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-02T21:10:17,161 WARN [master/7d4f3b9a7081:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:43731/user/jenkins/test-data/7fb631ac-f516-8a5a-0a27-1cd781752804/MasterData/data/master/store/.initializing 2024-12-02T21:10:17,161 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:43731/user/jenkins/test-data/7fb631ac-f516-8a5a-0a27-1cd781752804/MasterData/WALs/7d4f3b9a7081,44147,1733173816735 2024-12-02T21:10:17,164 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=7d4f3b9a7081%2C44147%2C1733173816735, suffix=, logDir=hdfs://localhost:43731/user/jenkins/test-data/7fb631ac-f516-8a5a-0a27-1cd781752804/MasterData/WALs/7d4f3b9a7081,44147,1733173816735, archiveDir=hdfs://localhost:43731/user/jenkins/test-data/7fb631ac-f516-8a5a-0a27-1cd781752804/MasterData/oldWALs, maxLogs=10 2024-12-02T21:10:17,165 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7d4f3b9a7081%2C44147%2C1733173816735.1733173817165 2024-12-02T21:10:17,170 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/7fb631ac-f516-8a5a-0a27-1cd781752804/MasterData/WALs/7d4f3b9a7081,44147,1733173816735/7d4f3b9a7081%2C44147%2C1733173816735.1733173817165 2024-12-02T21:10:17,170 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33051:33051),(127.0.0.1/127.0.0.1:46493:46493)] 2024-12-02T21:10:17,170 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] regionserver.HRegion(7285): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-02T21:10:17,170 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] regionserver.HRegion(894): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T21:10:17,170 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] regionserver.HRegion(7327): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-02T21:10:17,170 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] regionserver.HRegion(7330): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-02T21:10:17,172 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-02T21:10:17,173 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-02T21:10:17,173 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T21:10:17,174 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-02T21:10:17,174 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-02T21:10:17,175 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-02T21:10:17,175 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T21:10:17,175 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T21:10:17,176 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-02T21:10:17,177 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-02T21:10:17,177 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T21:10:17,177 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T21:10:17,177 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-02T21:10:17,179 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-02T21:10:17,179 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T21:10:17,179 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T21:10:17,180 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:43731/user/jenkins/test-data/7fb631ac-f516-8a5a-0a27-1cd781752804/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-02T21:10:17,180 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:43731/user/jenkins/test-data/7fb631ac-f516-8a5a-0a27-1cd781752804/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-02T21:10:17,182 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-02T21:10:17,183 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] regionserver.HRegion(1085): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-02T21:10:17,185 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:43731/user/jenkins/test-data/7fb631ac-f516-8a5a-0a27-1cd781752804/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-02T21:10:17,186 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] regionserver.HRegion(1102): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=821613, jitterRate=0.04473543167114258}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-02T21:10:17,186 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] regionserver.HRegion(1001): Region open journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-02T21:10:17,187 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-02T21:10:17,190 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7da9272d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T21:10:17,191 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] master.HMaster(882): No meta location available on zookeeper, skip migrating... 2024-12-02T21:10:17,191 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-02T21:10:17,191 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(633): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-02T21:10:17,191 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-02T21:10:17,192 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(653): Recovered RegionProcedureStore lease in 0 msec 2024-12-02T21:10:17,192 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(667): Loaded RegionProcedureStore in 0 msec 2024-12-02T21:10:17,192 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-02T21:10:17,195 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-02T21:10:17,196 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44147-0x101992b50010000, quorum=127.0.0.1:59576, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-02T21:10:17,202 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/balancer already deleted, retry=false 2024-12-02T21:10:17,203 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-02T21:10:17,203 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44147-0x101992b50010000, quorum=127.0.0.1:59576, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-02T21:10:17,211 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/normalizer already deleted, retry=false 2024-12-02T21:10:17,211 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-02T21:10:17,212 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44147-0x101992b50010000, quorum=127.0.0.1:59576, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-02T21:10:17,219 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/switch/split already deleted, retry=false 2024-12-02T21:10:17,220 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44147-0x101992b50010000, quorum=127.0.0.1:59576, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-02T21:10:17,227 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/switch/merge already deleted, retry=false 2024-12-02T21:10:17,229 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44147-0x101992b50010000, quorum=127.0.0.1:59576, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-02T21:10:17,236 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-02T21:10:17,244 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42581-0x101992b50010001, quorum=127.0.0.1:59576, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-02T21:10:17,244 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44147-0x101992b50010000, quorum=127.0.0.1:59576, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-02T21:10:17,244 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42581-0x101992b50010001, quorum=127.0.0.1:59576, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T21:10:17,244 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44147-0x101992b50010000, quorum=127.0.0.1:59576, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T21:10:17,244 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] master.HMaster(826): Active/primary master=7d4f3b9a7081,44147,1733173816735, sessionid=0x101992b50010000, setting cluster-up flag (Was=false) 2024-12-02T21:10:17,261 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42581-0x101992b50010001, quorum=127.0.0.1:59576, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T21:10:17,261 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44147-0x101992b50010000, quorum=127.0.0.1:59576, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T21:10:17,286 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-02T21:10:17,286 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=7d4f3b9a7081,44147,1733173816735 2024-12-02T21:10:17,302 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42581-0x101992b50010001, quorum=127.0.0.1:59576, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T21:10:17,302 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44147-0x101992b50010000, quorum=127.0.0.1:59576, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T21:10:17,327 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-02T21:10:17,328 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=7d4f3b9a7081,44147,1733173816735 2024-12-02T21:10:17,331 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT; InitMetaProcedure table=hbase:meta 2024-12-02T21:10:17,332 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(575): slop=0.2 2024-12-02T21:10:17,332 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(294): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-02T21:10:17,332 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] balancer.RegionLocationFinder(146): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 7d4f3b9a7081,44147,1733173816735 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-02T21:10:17,332 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/7d4f3b9a7081:0, corePoolSize=5, maxPoolSize=5 2024-12-02T21:10:17,332 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/7d4f3b9a7081:0, corePoolSize=5, maxPoolSize=5 2024-12-02T21:10:17,332 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/7d4f3b9a7081:0, corePoolSize=5, maxPoolSize=5 2024-12-02T21:10:17,332 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/7d4f3b9a7081:0, corePoolSize=5, maxPoolSize=5 2024-12-02T21:10:17,332 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/7d4f3b9a7081:0, corePoolSize=10, maxPoolSize=10 2024-12-02T21:10:17,333 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/7d4f3b9a7081:0, corePoolSize=1, maxPoolSize=1 2024-12-02T21:10:17,333 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/7d4f3b9a7081:0, corePoolSize=2, maxPoolSize=2 2024-12-02T21:10:17,333 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/7d4f3b9a7081:0, corePoolSize=1, maxPoolSize=1 2024-12-02T21:10:17,333 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733173847333 2024-12-02T21:10:17,334 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-02T21:10:17,334 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-02T21:10:17,334 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-02T21:10:17,334 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-02T21:10:17,334 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-02T21:10:17,334 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-02T21:10:17,334 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-02T21:10:17,334 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(96): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, locked=true; InitMetaProcedure table=hbase:meta 2024-12-02T21:10:17,334 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-02T21:10:17,334 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(75): BOOTSTRAP: creating hbase:meta region 2024-12-02T21:10:17,334 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-02T21:10:17,334 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-02T21:10:17,335 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-02T21:10:17,335 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-02T21:10:17,336 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T21:10:17,336 INFO [PEWorker-1 {}] util.FSTableDescriptors(133): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-02T21:10:17,338 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/7d4f3b9a7081:0:becomeActiveMaster-HFileCleaner.large.0-1733173817335,5,FailOnTimeoutGroup] 2024-12-02T21:10:17,341 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/7d4f3b9a7081:0:becomeActiveMaster-HFileCleaner.small.0-1733173817338,5,FailOnTimeoutGroup] 2024-12-02T21:10:17,341 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-02T21:10:17,341 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] master.HMaster(1680): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-02T21:10:17,341 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-02T21:10:17,341 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-02T21:10:17,347 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34831 is added to blk_1073741831_1007 (size=1039) 2024-12-02T21:10:17,348 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44765 is added to blk_1073741831_1007 (size=1039) 2024-12-02T21:10:17,377 DEBUG [RS:0;7d4f3b9a7081:42581 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;7d4f3b9a7081:42581 2024-12-02T21:10:17,378 INFO [RS:0;7d4f3b9a7081:42581 {}] regionserver.HRegionServer(1008): ClusterId : e16b76fd-1123-413b-bcf8-62aae384aed5 2024-12-02T21:10:17,378 DEBUG [RS:0;7d4f3b9a7081:42581 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-02T21:10:17,386 DEBUG [RS:0;7d4f3b9a7081:42581 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-02T21:10:17,386 DEBUG [RS:0;7d4f3b9a7081:42581 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-02T21:10:17,395 DEBUG [RS:0;7d4f3b9a7081:42581 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-02T21:10:17,395 DEBUG [RS:0;7d4f3b9a7081:42581 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@14d9d3a0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T21:10:17,395 DEBUG [RS:0;7d4f3b9a7081:42581 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5adcfa07, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=7d4f3b9a7081/172.17.0.2:0 2024-12-02T21:10:17,396 INFO [RS:0;7d4f3b9a7081:42581 {}] regionserver.RegionServerCoprocessorHost(67): System coprocessor loading is enabled 2024-12-02T21:10:17,396 INFO [RS:0;7d4f3b9a7081:42581 {}] regionserver.RegionServerCoprocessorHost(68): Table coprocessor loading is enabled 2024-12-02T21:10:17,396 DEBUG [RS:0;7d4f3b9a7081:42581 {}] regionserver.HRegionServer(1090): About to register with Master. 2024-12-02T21:10:17,396 INFO [RS:0;7d4f3b9a7081:42581 {}] regionserver.HRegionServer(3073): reportForDuty to master=7d4f3b9a7081,44147,1733173816735 with isa=7d4f3b9a7081/172.17.0.2:42581, startcode=1733173817044 2024-12-02T21:10:17,397 DEBUG [RS:0;7d4f3b9a7081:42581 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-02T21:10:17,401 INFO [RS-EventLoopGroup-10-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40327, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.4 (auth:SIMPLE), service=RegionServerStatusService 2024-12-02T21:10:17,402 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44147 {}] master.ServerManager(332): Checking decommissioned status of RegionServer 7d4f3b9a7081,42581,1733173817044 2024-12-02T21:10:17,402 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44147 {}] master.ServerManager(486): Registering regionserver=7d4f3b9a7081,42581,1733173817044 2024-12-02T21:10:17,403 DEBUG [RS:0;7d4f3b9a7081:42581 {}] regionserver.HRegionServer(1725): Config from master: hbase.rootdir=hdfs://localhost:43731/user/jenkins/test-data/7fb631ac-f516-8a5a-0a27-1cd781752804 2024-12-02T21:10:17,403 DEBUG [RS:0;7d4f3b9a7081:42581 {}] regionserver.HRegionServer(1725): Config from master: fs.defaultFS=hdfs://localhost:43731 2024-12-02T21:10:17,403 DEBUG [RS:0;7d4f3b9a7081:42581 {}] regionserver.HRegionServer(1725): Config from master: hbase.master.info.port=-1 2024-12-02T21:10:17,408 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.meta.1733173708307.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:10:17,411 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44147-0x101992b50010000, quorum=127.0.0.1:59576, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-02T21:10:17,411 DEBUG [RS:0;7d4f3b9a7081:42581 {}] zookeeper.ZKUtil(111): regionserver:42581-0x101992b50010001, quorum=127.0.0.1:59576, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/7d4f3b9a7081,42581,1733173817044 2024-12-02T21:10:17,411 WARN [RS:0;7d4f3b9a7081:42581 {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-02T21:10:17,411 INFO [RS:0;7d4f3b9a7081:42581 {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-02T21:10:17,411 DEBUG [RS:0;7d4f3b9a7081:42581 {}] regionserver.HRegionServer(2100): logDir=hdfs://localhost:43731/user/jenkins/test-data/7fb631ac-f516-8a5a-0a27-1cd781752804/WALs/7d4f3b9a7081,42581,1733173817044 2024-12-02T21:10:17,411 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [7d4f3b9a7081,42581,1733173817044] 2024-12-02T21:10:17,414 DEBUG [RS:0;7d4f3b9a7081:42581 {}] regionserver.Replication(140): Replication stats-in-log period=300 seconds 2024-12-02T21:10:17,414 INFO [RS:0;7d4f3b9a7081:42581 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-02T21:10:17,416 INFO [RS:0;7d4f3b9a7081:42581 {}] regionserver.MemStoreFlusher(130): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-02T21:10:17,417 INFO [RS:0;7d4f3b9a7081:42581 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-02T21:10:17,417 INFO [RS:0;7d4f3b9a7081:42581 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-02T21:10:17,419 INFO [RS:0;7d4f3b9a7081:42581 {}] regionserver.HRegionServer$CompactionChecker(1988): CompactionChecker runs every PT1S 2024-12-02T21:10:17,419 INFO [RS:0;7d4f3b9a7081:42581 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-02T21:10:17,420 DEBUG [RS:0;7d4f3b9a7081:42581 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/7d4f3b9a7081:0, corePoolSize=1, maxPoolSize=1 2024-12-02T21:10:17,420 DEBUG [RS:0;7d4f3b9a7081:42581 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/7d4f3b9a7081:0, corePoolSize=1, maxPoolSize=1 2024-12-02T21:10:17,420 DEBUG [RS:0;7d4f3b9a7081:42581 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/7d4f3b9a7081:0, corePoolSize=1, maxPoolSize=1 2024-12-02T21:10:17,420 DEBUG [RS:0;7d4f3b9a7081:42581 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/7d4f3b9a7081:0, corePoolSize=1, maxPoolSize=1 2024-12-02T21:10:17,420 DEBUG [RS:0;7d4f3b9a7081:42581 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/7d4f3b9a7081:0, corePoolSize=1, maxPoolSize=1 2024-12-02T21:10:17,420 DEBUG [RS:0;7d4f3b9a7081:42581 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/7d4f3b9a7081:0, corePoolSize=2, maxPoolSize=2 2024-12-02T21:10:17,420 DEBUG [RS:0;7d4f3b9a7081:42581 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/7d4f3b9a7081:0, corePoolSize=1, maxPoolSize=1 2024-12-02T21:10:17,420 DEBUG [RS:0;7d4f3b9a7081:42581 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/7d4f3b9a7081:0, corePoolSize=1, maxPoolSize=1 2024-12-02T21:10:17,420 DEBUG [RS:0;7d4f3b9a7081:42581 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/7d4f3b9a7081:0, corePoolSize=1, maxPoolSize=1 2024-12-02T21:10:17,420 DEBUG [RS:0;7d4f3b9a7081:42581 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/7d4f3b9a7081:0, corePoolSize=1, maxPoolSize=1 2024-12-02T21:10:17,420 DEBUG [RS:0;7d4f3b9a7081:42581 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/7d4f3b9a7081:0, corePoolSize=1, maxPoolSize=1 2024-12-02T21:10:17,420 DEBUG [RS:0;7d4f3b9a7081:42581 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/7d4f3b9a7081:0, corePoolSize=3, maxPoolSize=3 2024-12-02T21:10:17,420 DEBUG [RS:0;7d4f3b9a7081:42581 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/7d4f3b9a7081:0, corePoolSize=3, maxPoolSize=3 2024-12-02T21:10:17,423 INFO [RS:0;7d4f3b9a7081:42581 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-02T21:10:17,423 INFO [RS:0;7d4f3b9a7081:42581 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-02T21:10:17,423 INFO [RS:0;7d4f3b9a7081:42581 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-02T21:10:17,423 INFO [RS:0;7d4f3b9a7081:42581 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-02T21:10:17,423 INFO [RS:0;7d4f3b9a7081:42581 {}] hbase.ChoreService(168): Chore ScheduledChore name=7d4f3b9a7081,42581,1733173817044-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-02T21:10:17,436 INFO [RS:0;7d4f3b9a7081:42581 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-02T21:10:17,436 INFO [RS:0;7d4f3b9a7081:42581 {}] hbase.ChoreService(168): Chore ScheduledChore name=7d4f3b9a7081,42581,1733173817044-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-02T21:10:17,455 INFO [RS:0;7d4f3b9a7081:42581 {}] regionserver.Replication(204): 7d4f3b9a7081,42581,1733173817044 started 2024-12-02T21:10:17,455 INFO [RS:0;7d4f3b9a7081:42581 {}] regionserver.HRegionServer(1767): Serving as 7d4f3b9a7081,42581,1733173817044, RpcServer on 7d4f3b9a7081/172.17.0.2:42581, sessionid=0x101992b50010001 2024-12-02T21:10:17,455 DEBUG [RS:0;7d4f3b9a7081:42581 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-02T21:10:17,455 DEBUG [RS:0;7d4f3b9a7081:42581 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 7d4f3b9a7081,42581,1733173817044 2024-12-02T21:10:17,455 DEBUG [RS:0;7d4f3b9a7081:42581 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '7d4f3b9a7081,42581,1733173817044' 2024-12-02T21:10:17,455 DEBUG [RS:0;7d4f3b9a7081:42581 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-02T21:10:17,456 DEBUG [RS:0;7d4f3b9a7081:42581 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-02T21:10:17,456 DEBUG [RS:0;7d4f3b9a7081:42581 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-02T21:10:17,456 DEBUG [RS:0;7d4f3b9a7081:42581 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-02T21:10:17,456 DEBUG [RS:0;7d4f3b9a7081:42581 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 7d4f3b9a7081,42581,1733173817044 2024-12-02T21:10:17,456 DEBUG [RS:0;7d4f3b9a7081:42581 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '7d4f3b9a7081,42581,1733173817044' 2024-12-02T21:10:17,457 DEBUG [RS:0;7d4f3b9a7081:42581 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-02T21:10:17,457 DEBUG [RS:0;7d4f3b9a7081:42581 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-02T21:10:17,458 DEBUG [RS:0;7d4f3b9a7081:42581 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-02T21:10:17,458 INFO [RS:0;7d4f3b9a7081:42581 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-02T21:10:17,458 INFO [RS:0;7d4f3b9a7081:42581 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-02T21:10:17,560 INFO [RS:0;7d4f3b9a7081:42581 {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=7d4f3b9a7081%2C42581%2C1733173817044, suffix=, logDir=hdfs://localhost:43731/user/jenkins/test-data/7fb631ac-f516-8a5a-0a27-1cd781752804/WALs/7d4f3b9a7081,42581,1733173817044, archiveDir=hdfs://localhost:43731/user/jenkins/test-data/7fb631ac-f516-8a5a-0a27-1cd781752804/oldWALs, maxLogs=32 2024-12-02T21:10:17,560 INFO [RS:0;7d4f3b9a7081:42581 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7d4f3b9a7081%2C42581%2C1733173817044.1733173817560 2024-12-02T21:10:17,569 INFO [RS:0;7d4f3b9a7081:42581 {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/7fb631ac-f516-8a5a-0a27-1cd781752804/WALs/7d4f3b9a7081,42581,1733173817044/7d4f3b9a7081%2C42581%2C1733173817044.1733173817560 2024-12-02T21:10:17,569 DEBUG [RS:0;7d4f3b9a7081:42581 {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46493:46493),(127.0.0.1/127.0.0.1:33051:33051)] 2024-12-02T21:10:17,749 INFO [PEWorker-1 {}] util.FSTableDescriptors(140): Updated hbase:meta table descriptor to hdfs://localhost:43731/user/jenkins/test-data/7fb631ac-f516-8a5a-0a27-1cd781752804/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1039 2024-12-02T21:10:17,749 INFO [PEWorker-1 {}] regionserver.HRegion(7106): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:43731/user/jenkins/test-data/7fb631ac-f516-8a5a-0a27-1cd781752804 2024-12-02T21:10:17,759 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34831 is added to blk_1073741833_1009 (size=32) 2024-12-02T21:10:17,760 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44765 is added to blk_1073741833_1009 (size=32) 2024-12-02T21:10:17,761 DEBUG [PEWorker-1 {}] regionserver.HRegion(894): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T21:10:17,763 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-02T21:10:17,765 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-02T21:10:17,765 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T21:10:17,766 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-02T21:10:17,766 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-02T21:10:17,769 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-02T21:10:17,769 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T21:10:17,769 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-02T21:10:17,769 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-02T21:10:17,770 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-02T21:10:17,770 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T21:10:17,771 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-02T21:10:17,772 DEBUG [PEWorker-1 {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:43731/user/jenkins/test-data/7fb631ac-f516-8a5a-0a27-1cd781752804/data/hbase/meta/1588230740 2024-12-02T21:10:17,772 DEBUG [PEWorker-1 {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:43731/user/jenkins/test-data/7fb631ac-f516-8a5a-0a27-1cd781752804/data/hbase/meta/1588230740 2024-12-02T21:10:17,773 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-02T21:10:17,774 DEBUG [PEWorker-1 {}] regionserver.HRegion(1085): writing seq id for 1588230740 2024-12-02T21:10:17,776 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:43731/user/jenkins/test-data/7fb631ac-f516-8a5a-0a27-1cd781752804/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-02T21:10:17,777 INFO [PEWorker-1 {}] regionserver.HRegion(1102): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=809396, jitterRate=0.029200315475463867}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-02T21:10:17,777 DEBUG [PEWorker-1 {}] regionserver.HRegion(1001): Region open journal for 1588230740: 2024-12-02T21:10:17,777 DEBUG [PEWorker-1 {}] regionserver.HRegion(1681): Closing 1588230740, disabling compactions & flushes 2024-12-02T21:10:17,777 INFO [PEWorker-1 {}] regionserver.HRegion(1703): Closing region hbase:meta,,1.1588230740 2024-12-02T21:10:17,777 DEBUG [PEWorker-1 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:meta,,1.1588230740 2024-12-02T21:10:17,777 DEBUG [PEWorker-1 {}] regionserver.HRegion(1791): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-02T21:10:17,777 DEBUG [PEWorker-1 {}] regionserver.HRegion(1801): Updates disabled for region hbase:meta,,1.1588230740 2024-12-02T21:10:17,777 INFO [PEWorker-1 {}] regionserver.HRegion(1922): Closed hbase:meta,,1.1588230740 2024-12-02T21:10:17,777 DEBUG [PEWorker-1 {}] regionserver.HRegion(1635): Region close journal for 1588230740: 2024-12-02T21:10:17,778 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(96): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, locked=true; InitMetaProcedure table=hbase:meta 2024-12-02T21:10:17,778 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(107): Going to assign meta 2024-12-02T21:10:17,778 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-02T21:10:17,779 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-02T21:10:17,780 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-02T21:10:17,930 DEBUG [7d4f3b9a7081:44147 {}] assignment.AssignmentManager(2444): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-12-02T21:10:17,931 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=7d4f3b9a7081,42581,1733173817044 2024-12-02T21:10:17,932 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 7d4f3b9a7081,42581,1733173817044, state=OPENING 2024-12-02T21:10:17,969 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-02T21:10:18,011 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44147-0x101992b50010000, quorum=127.0.0.1:59576, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T21:10:18,011 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42581-0x101992b50010001, quorum=127.0.0.1:59576, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T21:10:18,012 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE; OpenRegionProcedure 1588230740, server=7d4f3b9a7081,42581,1733173817044}] 2024-12-02T21:10:18,012 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-02T21:10:18,013 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-02T21:10:18,169 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 7d4f3b9a7081,42581,1733173817044 2024-12-02T21:10:18,169 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-02T21:10:18,171 INFO [RS-EventLoopGroup-11-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40344, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-02T21:10:18,175 INFO [RS_OPEN_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(135): Open hbase:meta,,1.1588230740 2024-12-02T21:10:18,175 INFO [RS_OPEN_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-02T21:10:18,177 INFO [RS_OPEN_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=7d4f3b9a7081%2C42581%2C1733173817044.meta, suffix=.meta, logDir=hdfs://localhost:43731/user/jenkins/test-data/7fb631ac-f516-8a5a-0a27-1cd781752804/WALs/7d4f3b9a7081,42581,1733173817044, archiveDir=hdfs://localhost:43731/user/jenkins/test-data/7fb631ac-f516-8a5a-0a27-1cd781752804/oldWALs, maxLogs=32 2024-12-02T21:10:18,177 INFO [RS_OPEN_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 7d4f3b9a7081%2C42581%2C1733173817044.meta.1733173818177.meta 2024-12-02T21:10:18,186 INFO [RS_OPEN_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/7fb631ac-f516-8a5a-0a27-1cd781752804/WALs/7d4f3b9a7081,42581,1733173817044/7d4f3b9a7081%2C42581%2C1733173817044.meta.1733173818177.meta 2024-12-02T21:10:18,186 DEBUG [RS_OPEN_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33051:33051),(127.0.0.1/127.0.0.1:46493:46493)] 2024-12-02T21:10:18,186 DEBUG [RS_OPEN_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7285): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-02T21:10:18,187 DEBUG [RS_OPEN_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-02T21:10:18,187 DEBUG [RS_OPEN_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7999): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-02T21:10:18,187 INFO [RS_OPEN_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(436): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-02T21:10:18,187 DEBUG [RS_OPEN_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-02T21:10:18,187 DEBUG [RS_OPEN_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(894): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T21:10:18,187 DEBUG [RS_OPEN_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7327): checking encryption for 1588230740 2024-12-02T21:10:18,187 DEBUG [RS_OPEN_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7330): checking classloading for 1588230740 2024-12-02T21:10:18,189 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-02T21:10:18,190 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-02T21:10:18,190 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T21:10:18,191 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-02T21:10:18,191 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-02T21:10:18,192 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-02T21:10:18,192 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T21:10:18,193 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-02T21:10:18,193 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-02T21:10:18,193 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-02T21:10:18,193 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T21:10:18,194 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-02T21:10:18,195 DEBUG [RS_OPEN_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:43731/user/jenkins/test-data/7fb631ac-f516-8a5a-0a27-1cd781752804/data/hbase/meta/1588230740 2024-12-02T21:10:18,196 DEBUG [RS_OPEN_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:43731/user/jenkins/test-data/7fb631ac-f516-8a5a-0a27-1cd781752804/data/hbase/meta/1588230740 2024-12-02T21:10:18,197 DEBUG [RS_OPEN_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-02T21:10:18,199 DEBUG [RS_OPEN_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1085): writing seq id for 1588230740 2024-12-02T21:10:18,200 INFO [RS_OPEN_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1102): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=795588, jitterRate=0.01164291799068451}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-02T21:10:18,200 DEBUG [RS_OPEN_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1001): Region open journal for 1588230740: 2024-12-02T21:10:18,201 INFO [RS_OPEN_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2601): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733173818168 2024-12-02T21:10:18,203 DEBUG [RS_OPEN_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2628): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-02T21:10:18,203 INFO [RS_OPEN_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(164): Opened hbase:meta,,1.1588230740 2024-12-02T21:10:18,204 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=7d4f3b9a7081,42581,1733173817044 2024-12-02T21:10:18,204 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 7d4f3b9a7081,42581,1733173817044, state=OPEN 2024-12-02T21:10:18,244 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44147-0x101992b50010000, quorum=127.0.0.1:59576, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-02T21:10:18,244 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42581-0x101992b50010001, quorum=127.0.0.1:59576, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-02T21:10:18,244 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-02T21:10:18,244 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-02T21:10:18,246 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=3, resume processing ppid=2 2024-12-02T21:10:18,246 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=3, ppid=2, state=SUCCESS; OpenRegionProcedure 1588230740, server=7d4f3b9a7081,42581,1733173817044 in 232 msec 2024-12-02T21:10:18,248 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=2, resume processing ppid=1 2024-12-02T21:10:18,248 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=2, ppid=1, state=SUCCESS; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 468 msec 2024-12-02T21:10:18,250 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=1, state=SUCCESS; InitMetaProcedure table=hbase:meta in 919 msec 2024-12-02T21:10:18,251 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] master.HMaster(1088): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733173818250, completionTime=-1 2024-12-02T21:10:18,251 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] master.ServerManager(907): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-12-02T21:10:18,251 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] assignment.AssignmentManager(1747): Joining cluster... 2024-12-02T21:10:18,251 DEBUG [hconnection-0x66846fbd-shared-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T21:10:18,253 INFO [RS-EventLoopGroup-11-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40348, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T21:10:18,254 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] assignment.AssignmentManager(1759): Number of RegionServers=1 2024-12-02T21:10:18,254 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733173878254 2024-12-02T21:10:18,254 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733173938254 2024-12-02T21:10:18,254 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] assignment.AssignmentManager(1766): Joined the cluster in 3 msec 2024-12-02T21:10:18,286 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7d4f3b9a7081,44147,1733173816735-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-02T21:10:18,286 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7d4f3b9a7081,44147,1733173816735-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-02T21:10:18,286 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7d4f3b9a7081,44147,1733173816735-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-02T21:10:18,286 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-7d4f3b9a7081:44147, period=300000, unit=MILLISECONDS is enabled. 2024-12-02T21:10:18,286 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-02T21:10:18,287 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] master.TableNamespaceManager(92): Namespace table not found. Creating... 2024-12-02T21:10:18,287 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] master.HMaster(2425): Client=null/null create 'hbase:namespace', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '10', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-02T21:10:18,288 DEBUG [master/7d4f3b9a7081:0.Chore.1 {}] janitor.CatalogJanitor(179): 2024-12-02T21:10:18,288 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=hbase:namespace 2024-12-02T21:10:18,290 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_PRE_OPERATION 2024-12-02T21:10:18,290 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T21:10:18,291 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-02T21:10:18,299 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34831 is added to blk_1073741835_1011 (size=358) 2024-12-02T21:10:18,300 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44765 is added to blk_1073741835_1011 (size=358) 2024-12-02T21:10:18,307 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 9565ecffd77110088eb77c1a304ad17a, NAME => 'hbase:namespace,,1733173818287.9565ecffd77110088eb77c1a304ad17a.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:namespace', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '10', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:43731/user/jenkins/test-data/7fb631ac-f516-8a5a-0a27-1cd781752804 2024-12-02T21:10:18,318 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34831 is added to blk_1073741836_1012 (size=42) 2024-12-02T21:10:18,318 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44765 is added to blk_1073741836_1012 (size=42) 2024-12-02T21:10:18,409 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.meta.1733173708307.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:10:18,719 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(894): Instantiated hbase:namespace,,1733173818287.9565ecffd77110088eb77c1a304ad17a.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T21:10:18,720 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1681): Closing 9565ecffd77110088eb77c1a304ad17a, disabling compactions & flushes 2024-12-02T21:10:18,720 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1703): Closing region hbase:namespace,,1733173818287.9565ecffd77110088eb77c1a304ad17a. 2024-12-02T21:10:18,720 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:namespace,,1733173818287.9565ecffd77110088eb77c1a304ad17a. 2024-12-02T21:10:18,720 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on hbase:namespace,,1733173818287.9565ecffd77110088eb77c1a304ad17a. after waiting 0 ms 2024-12-02T21:10:18,720 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region hbase:namespace,,1733173818287.9565ecffd77110088eb77c1a304ad17a. 2024-12-02T21:10:18,720 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1922): Closed hbase:namespace,,1733173818287.9565ecffd77110088eb77c1a304ad17a. 2024-12-02T21:10:18,720 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1635): Region close journal for 9565ecffd77110088eb77c1a304ad17a: 2024-12-02T21:10:18,722 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_ADD_TO_META 2024-12-02T21:10:18,722 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"hbase:namespace,,1733173818287.9565ecffd77110088eb77c1a304ad17a.","families":{"info":[{"qualifier":"regioninfo","vlen":41,"tag":[],"timestamp":"1733173818722"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733173818722"}]},"ts":"1733173818722"} 2024-12-02T21:10:18,728 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-02T21:10:18,729 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-02T21:10:18,729 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:namespace","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733173818729"}]},"ts":"1733173818729"} 2024-12-02T21:10:18,731 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:namespace, state=ENABLING in hbase:meta 2024-12-02T21:10:18,807 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:namespace, region=9565ecffd77110088eb77c1a304ad17a, ASSIGN}] 2024-12-02T21:10:18,808 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:namespace, region=9565ecffd77110088eb77c1a304ad17a, ASSIGN 2024-12-02T21:10:18,810 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(264): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=hbase:namespace, region=9565ecffd77110088eb77c1a304ad17a, ASSIGN; state=OFFLINE, location=7d4f3b9a7081,42581,1733173817044; forceNewPlan=false, retain=false 2024-12-02T21:10:18,960 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=5 updating hbase:meta row=9565ecffd77110088eb77c1a304ad17a, regionState=OPENING, regionLocation=7d4f3b9a7081,42581,1733173817044 2024-12-02T21:10:18,962 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE; OpenRegionProcedure 9565ecffd77110088eb77c1a304ad17a, server=7d4f3b9a7081,42581,1733173817044}] 2024-12-02T21:10:19,114 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 7d4f3b9a7081,42581,1733173817044 2024-12-02T21:10:19,117 INFO [RS_OPEN_PRIORITY_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(135): Open hbase:namespace,,1733173818287.9565ecffd77110088eb77c1a304ad17a. 2024-12-02T21:10:19,118 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7285): Opening region: {ENCODED => 9565ecffd77110088eb77c1a304ad17a, NAME => 'hbase:namespace,,1733173818287.9565ecffd77110088eb77c1a304ad17a.', STARTKEY => '', ENDKEY => ''} 2024-12-02T21:10:19,118 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table namespace 9565ecffd77110088eb77c1a304ad17a 2024-12-02T21:10:19,118 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(894): Instantiated hbase:namespace,,1733173818287.9565ecffd77110088eb77c1a304ad17a.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T21:10:19,118 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7327): checking encryption for 9565ecffd77110088eb77c1a304ad17a 2024-12-02T21:10:19,118 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7330): checking classloading for 9565ecffd77110088eb77c1a304ad17a 2024-12-02T21:10:19,119 INFO [StoreOpener-9565ecffd77110088eb77c1a304ad17a-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 9565ecffd77110088eb77c1a304ad17a 2024-12-02T21:10:19,121 INFO [StoreOpener-9565ecffd77110088eb77c1a304ad17a-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 9565ecffd77110088eb77c1a304ad17a columnFamilyName info 2024-12-02T21:10:19,121 DEBUG [StoreOpener-9565ecffd77110088eb77c1a304ad17a-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T21:10:19,122 INFO [StoreOpener-9565ecffd77110088eb77c1a304ad17a-1 {}] regionserver.HStore(327): Store=9565ecffd77110088eb77c1a304ad17a/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T21:10:19,123 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:43731/user/jenkins/test-data/7fb631ac-f516-8a5a-0a27-1cd781752804/data/hbase/namespace/9565ecffd77110088eb77c1a304ad17a 2024-12-02T21:10:19,124 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:43731/user/jenkins/test-data/7fb631ac-f516-8a5a-0a27-1cd781752804/data/hbase/namespace/9565ecffd77110088eb77c1a304ad17a 2024-12-02T21:10:19,126 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1085): writing seq id for 9565ecffd77110088eb77c1a304ad17a 2024-12-02T21:10:19,129 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:43731/user/jenkins/test-data/7fb631ac-f516-8a5a-0a27-1cd781752804/data/hbase/namespace/9565ecffd77110088eb77c1a304ad17a/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-02T21:10:19,129 INFO [RS_OPEN_PRIORITY_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1102): Opened 9565ecffd77110088eb77c1a304ad17a; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=713470, jitterRate=-0.09277674555778503}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-02T21:10:19,129 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1001): Region open journal for 9565ecffd77110088eb77c1a304ad17a: 2024-12-02T21:10:19,130 INFO [RS_OPEN_PRIORITY_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2601): Post open deploy tasks for hbase:namespace,,1733173818287.9565ecffd77110088eb77c1a304ad17a., pid=6, masterSystemTime=1733173819114 2024-12-02T21:10:19,132 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2628): Finished post open deploy task for hbase:namespace,,1733173818287.9565ecffd77110088eb77c1a304ad17a. 2024-12-02T21:10:19,132 INFO [RS_OPEN_PRIORITY_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(164): Opened hbase:namespace,,1733173818287.9565ecffd77110088eb77c1a304ad17a. 2024-12-02T21:10:19,133 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=5 updating hbase:meta row=9565ecffd77110088eb77c1a304ad17a, regionState=OPEN, openSeqNum=2, regionLocation=7d4f3b9a7081,42581,1733173817044 2024-12-02T21:10:19,137 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=6, resume processing ppid=5 2024-12-02T21:10:19,138 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=6, ppid=5, state=SUCCESS; OpenRegionProcedure 9565ecffd77110088eb77c1a304ad17a, server=7d4f3b9a7081,42581,1733173817044 in 173 msec 2024-12-02T21:10:19,139 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=5, resume processing ppid=4 2024-12-02T21:10:19,139 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=5, ppid=4, state=SUCCESS; TransitRegionStateProcedure table=hbase:namespace, region=9565ecffd77110088eb77c1a304ad17a, ASSIGN in 330 msec 2024-12-02T21:10:19,140 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-02T21:10:19,140 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:namespace","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733173819140"}]},"ts":"1733173819140"} 2024-12-02T21:10:19,141 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:namespace, state=ENABLED in hbase:meta 2024-12-02T21:10:19,185 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_POST_OPERATION 2024-12-02T21:10:19,187 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=4, state=SUCCESS; CreateTableProcedure table=hbase:namespace in 898 msec 2024-12-02T21:10:19,190 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] zookeeper.ZKUtil(113): master:44147-0x101992b50010000, quorum=127.0.0.1:59576, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/namespace 2024-12-02T21:10:19,202 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44147-0x101992b50010000, quorum=127.0.0.1:59576, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/namespace 2024-12-02T21:10:19,202 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42581-0x101992b50010001, quorum=127.0.0.1:59576, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T21:10:19,202 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44147-0x101992b50010000, quorum=127.0.0.1:59576, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T21:10:19,208 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=7, state=RUNNABLE:CREATE_NAMESPACE_PREPARE; CreateNamespaceProcedure, namespace=default 2024-12-02T21:10:19,219 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44147-0x101992b50010000, quorum=127.0.0.1:59576, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/namespace 2024-12-02T21:10:19,231 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=7, state=SUCCESS; CreateNamespaceProcedure, namespace=default in 22 msec 2024-12-02T21:10:19,240 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=8, state=RUNNABLE:CREATE_NAMESPACE_PREPARE; CreateNamespaceProcedure, namespace=hbase 2024-12-02T21:10:19,252 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44147-0x101992b50010000, quorum=127.0.0.1:59576, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/namespace 2024-12-02T21:10:19,263 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=8, state=SUCCESS; CreateNamespaceProcedure, namespace=hbase in 23 msec 2024-12-02T21:10:19,286 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44147-0x101992b50010000, quorum=127.0.0.1:59576, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/namespace/default 2024-12-02T21:10:19,302 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44147-0x101992b50010000, quorum=127.0.0.1:59576, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/namespace/hbase 2024-12-02T21:10:19,302 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] master.HMaster(1218): Master has completed initialization 2.208sec 2024-12-02T21:10:19,302 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-02T21:10:19,303 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-02T21:10:19,303 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-02T21:10:19,303 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-02T21:10:19,303 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-02T21:10:19,303 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7d4f3b9a7081,44147,1733173816735-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-02T21:10:19,303 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7d4f3b9a7081,44147,1733173816735-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-02T21:10:19,306 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] master.HMaster(1321): Balancer post startup initialization complete, took 0 seconds 2024-12-02T21:10:19,306 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-02T21:10:19,307 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7d4f3b9a7081,44147,1733173816735-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-02T21:10:19,370 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x1ff4d5b3 to 127.0.0.1:59576 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@4ff941b7 2024-12-02T21:10:19,378 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@10558de6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T21:10:19,380 DEBUG [hconnection-0x5e90deec-shared-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T21:10:19,383 INFO [RS-EventLoopGroup-11-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40358, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T21:10:19,385 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1199): Minicluster is up; activeMaster=7d4f3b9a7081,44147,1733173816735 2024-12-02T21:10:19,385 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T21:10:19,388 INFO [Time-limited test {}] master.MasterRpcServices(506): Client=null/null set balanceSwitch=false 2024-12-02T21:10:19,390 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-02T21:10:19,399 INFO [RS-EventLoopGroup-10-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52726, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-02T21:10:19,401 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44147 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-12-02T21:10:19,401 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44147 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-12-02T21:10:19,402 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44147 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestLogRolling-testCompactionRecordDoesntBlockRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-02T21:10:19,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44147 {}] procedure2.ProcedureExecutor(1098): Stored pid=9, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-02T21:10:19,405 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-12-02T21:10:19,405 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T21:10:19,405 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44147 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testCompactionRecordDoesntBlockRolling" procId is: 9 2024-12-02T21:10:19,407 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-02T21:10:19,409 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.meta.1733173708307.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:10:19,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44147 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-02T21:10:19,429 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34831 is added to blk_1073741837_1013 (size=405) 2024-12-02T21:10:19,429 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44765 is added to blk_1073741837_1013 (size=405) 2024-12-02T21:10:19,431 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 092e5fccf24bb21e6cbee0026354f779, NAME => 'TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733173819401.092e5fccf24bb21e6cbee0026354f779.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testCompactionRecordDoesntBlockRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:43731/user/jenkins/test-data/7fb631ac-f516-8a5a-0a27-1cd781752804 2024-12-02T21:10:19,439 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44765 is added to blk_1073741838_1014 (size=88) 2024-12-02T21:10:19,439 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34831 is added to blk_1073741838_1014 (size=88) 2024-12-02T21:10:19,440 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(894): Instantiated TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733173819401.092e5fccf24bb21e6cbee0026354f779.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T21:10:19,440 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1681): Closing 092e5fccf24bb21e6cbee0026354f779, disabling compactions & flushes 2024-12-02T21:10:19,440 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1703): Closing region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733173819401.092e5fccf24bb21e6cbee0026354f779. 2024-12-02T21:10:19,440 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733173819401.092e5fccf24bb21e6cbee0026354f779. 2024-12-02T21:10:19,440 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733173819401.092e5fccf24bb21e6cbee0026354f779. after waiting 0 ms 2024-12-02T21:10:19,440 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733173819401.092e5fccf24bb21e6cbee0026354f779. 2024-12-02T21:10:19,440 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1922): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733173819401.092e5fccf24bb21e6cbee0026354f779. 2024-12-02T21:10:19,440 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1635): Region close journal for 092e5fccf24bb21e6cbee0026354f779: 2024-12-02T21:10:19,442 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_ADD_TO_META 2024-12-02T21:10:19,442 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733173819401.092e5fccf24bb21e6cbee0026354f779.","families":{"info":[{"qualifier":"regioninfo","vlen":87,"tag":[],"timestamp":"1733173819442"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733173819442"}]},"ts":"1733173819442"} 2024-12-02T21:10:19,444 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-02T21:10:19,445 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-02T21:10:19,445 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733173819445"}]},"ts":"1733173819445"} 2024-12-02T21:10:19,447 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestLogRolling-testCompactionRecordDoesntBlockRolling, state=ENABLING in hbase:meta 2024-12-02T21:10:19,461 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=092e5fccf24bb21e6cbee0026354f779, ASSIGN}] 2024-12-02T21:10:19,462 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=092e5fccf24bb21e6cbee0026354f779, ASSIGN 2024-12-02T21:10:19,463 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=092e5fccf24bb21e6cbee0026354f779, ASSIGN; state=OFFLINE, location=7d4f3b9a7081,42581,1733173817044; forceNewPlan=false, retain=false 2024-12-02T21:10:19,614 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=10 updating hbase:meta row=092e5fccf24bb21e6cbee0026354f779, regionState=OPENING, regionLocation=7d4f3b9a7081,42581,1733173817044 2024-12-02T21:10:19,616 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=11, ppid=10, state=RUNNABLE; OpenRegionProcedure 092e5fccf24bb21e6cbee0026354f779, server=7d4f3b9a7081,42581,1733173817044}] 2024-12-02T21:10:19,769 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 7d4f3b9a7081,42581,1733173817044 2024-12-02T21:10:19,773 INFO [RS_OPEN_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(135): Open TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733173819401.092e5fccf24bb21e6cbee0026354f779. 2024-12-02T21:10:19,773 DEBUG [RS_OPEN_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7285): Opening region: {ENCODED => 092e5fccf24bb21e6cbee0026354f779, NAME => 'TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733173819401.092e5fccf24bb21e6cbee0026354f779.', STARTKEY => '', ENDKEY => ''} 2024-12-02T21:10:19,773 DEBUG [RS_OPEN_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testCompactionRecordDoesntBlockRolling 092e5fccf24bb21e6cbee0026354f779 2024-12-02T21:10:19,773 DEBUG [RS_OPEN_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(894): Instantiated TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733173819401.092e5fccf24bb21e6cbee0026354f779.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T21:10:19,773 DEBUG [RS_OPEN_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7327): checking encryption for 092e5fccf24bb21e6cbee0026354f779 2024-12-02T21:10:19,773 DEBUG [RS_OPEN_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7330): checking classloading for 092e5fccf24bb21e6cbee0026354f779 2024-12-02T21:10:19,775 INFO [StoreOpener-092e5fccf24bb21e6cbee0026354f779-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 092e5fccf24bb21e6cbee0026354f779 2024-12-02T21:10:19,776 INFO [StoreOpener-092e5fccf24bb21e6cbee0026354f779-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 092e5fccf24bb21e6cbee0026354f779 columnFamilyName info 2024-12-02T21:10:19,776 DEBUG [StoreOpener-092e5fccf24bb21e6cbee0026354f779-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T21:10:19,777 INFO [StoreOpener-092e5fccf24bb21e6cbee0026354f779-1 {}] regionserver.HStore(327): Store=092e5fccf24bb21e6cbee0026354f779/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T21:10:19,777 DEBUG [RS_OPEN_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:43731/user/jenkins/test-data/7fb631ac-f516-8a5a-0a27-1cd781752804/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/092e5fccf24bb21e6cbee0026354f779 2024-12-02T21:10:19,778 DEBUG [RS_OPEN_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:43731/user/jenkins/test-data/7fb631ac-f516-8a5a-0a27-1cd781752804/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/092e5fccf24bb21e6cbee0026354f779 2024-12-02T21:10:19,780 DEBUG [RS_OPEN_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1085): writing seq id for 092e5fccf24bb21e6cbee0026354f779 2024-12-02T21:10:19,782 DEBUG [RS_OPEN_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:43731/user/jenkins/test-data/7fb631ac-f516-8a5a-0a27-1cd781752804/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/092e5fccf24bb21e6cbee0026354f779/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-02T21:10:19,782 INFO [RS_OPEN_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1102): Opened 092e5fccf24bb21e6cbee0026354f779; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=765570, jitterRate=-0.02652774751186371}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-02T21:10:19,783 DEBUG [RS_OPEN_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1001): Region open journal for 092e5fccf24bb21e6cbee0026354f779: 2024-12-02T21:10:19,784 INFO [RS_OPEN_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2601): Post open deploy tasks for TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733173819401.092e5fccf24bb21e6cbee0026354f779., pid=11, masterSystemTime=1733173819769 2024-12-02T21:10:19,785 DEBUG [RS_OPEN_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2628): Finished post open deploy task for TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733173819401.092e5fccf24bb21e6cbee0026354f779. 2024-12-02T21:10:19,786 INFO [RS_OPEN_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(164): Opened TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733173819401.092e5fccf24bb21e6cbee0026354f779. 2024-12-02T21:10:19,786 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=10 updating hbase:meta row=092e5fccf24bb21e6cbee0026354f779, regionState=OPEN, openSeqNum=2, regionLocation=7d4f3b9a7081,42581,1733173817044 2024-12-02T21:10:19,790 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=11, resume processing ppid=10 2024-12-02T21:10:19,790 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=11, ppid=10, state=SUCCESS; OpenRegionProcedure 092e5fccf24bb21e6cbee0026354f779, server=7d4f3b9a7081,42581,1733173817044 in 172 msec 2024-12-02T21:10:19,791 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=10, resume processing ppid=9 2024-12-02T21:10:19,791 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=10, ppid=9, state=SUCCESS; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=092e5fccf24bb21e6cbee0026354f779, ASSIGN in 329 msec 2024-12-02T21:10:19,792 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-02T21:10:19,792 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733173819792"}]},"ts":"1733173819792"} 2024-12-02T21:10:19,794 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestLogRolling-testCompactionRecordDoesntBlockRolling, state=ENABLED in hbase:meta 2024-12-02T21:10:19,812 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_POST_OPERATION 2024-12-02T21:10:19,813 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=9, state=SUCCESS; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 410 msec 2024-12-02T21:10:20,410 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.meta.1733173708307.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:10:21,410 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.meta.1733173708307.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:10:22,412 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.meta.1733173708307.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:10:23,412 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.meta.1733173708307.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:10:23,690 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-02T21:10:23,691 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:10:23,691 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:10:23,691 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:10:23,691 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:10:23,708 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:10:23,708 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:10:23,708 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:10:23,708 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:10:23,708 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:10:23,709 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:10:23,711 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:10:23,711 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:10:23,712 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:10:23,713 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:10:23,717 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-02T21:10:23,718 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:namespace' 2024-12-02T21:10:23,718 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testCompactionRecordDoesntBlockRolling' 2024-12-02T21:10:24,008 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-02T21:10:24,008 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling Metrics about Tables on a single HBase RegionServer 2024-12-02T21:10:24,413 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.meta.1733173708307.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:10:24,414 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=2 on file=hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.meta.1733173708307.meta after 68070ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor199.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T21:10:25,414 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.meta.1733173708307.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:10:26,415 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.meta.1733173708307.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:10:27,416 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.meta.1733173708307.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:10:28,417 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.meta.1733173708307.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:10:29,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44147 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-02T21:10:29,416 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling, procId: 9 completed 2024-12-02T21:10:29,417 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.meta.1733173708307.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:10:29,419 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2718): Found 1 regions for table TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-02T21:10:29,419 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2724): firstRegionName=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733173819401.092e5fccf24bb21e6cbee0026354f779. 2024-12-02T21:10:29,427 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44147 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush hbase:namespace 2024-12-02T21:10:29,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44147 {}] procedure2.ProcedureExecutor(1098): Stored pid=12, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=hbase:namespace 2024-12-02T21:10:29,434 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=12, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=hbase:namespace execute state=FLUSH_TABLE_PREPARE 2024-12-02T21:10:29,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44147 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-12-02T21:10:29,436 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=12, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=hbase:namespace execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-02T21:10:29,437 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=13, ppid=12, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-02T21:10:29,595 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 7d4f3b9a7081,42581,1733173817044 2024-12-02T21:10:29,596 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42581 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-02T21:10:29,597 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7d4f3b9a7081:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on hbase:namespace,,1733173818287.9565ecffd77110088eb77c1a304ad17a. 2024-12-02T21:10:29,597 INFO [RS_FLUSH_OPERATIONS-regionserver/7d4f3b9a7081:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2837): Flushing 9565ecffd77110088eb77c1a304ad17a 1/1 column families, dataSize=78 B heapSize=488 B 2024-12-02T21:10:29,612 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7d4f3b9a7081:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/7fb631ac-f516-8a5a-0a27-1cd781752804/data/hbase/namespace/9565ecffd77110088eb77c1a304ad17a/.tmp/info/050756151753418588885a357ae303fc is 45, key is default/info:d/1733173819213/Put/seqid=0 2024-12-02T21:10:29,617 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44765 is added to blk_1073741839_1015 (size=5037) 2024-12-02T21:10:29,617 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34831 is added to blk_1073741839_1015 (size=5037) 2024-12-02T21:10:29,618 INFO [RS_FLUSH_OPERATIONS-regionserver/7d4f3b9a7081:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=78 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/7fb631ac-f516-8a5a-0a27-1cd781752804/data/hbase/namespace/9565ecffd77110088eb77c1a304ad17a/.tmp/info/050756151753418588885a357ae303fc 2024-12-02T21:10:29,624 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7d4f3b9a7081:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/7fb631ac-f516-8a5a-0a27-1cd781752804/data/hbase/namespace/9565ecffd77110088eb77c1a304ad17a/.tmp/info/050756151753418588885a357ae303fc as hdfs://localhost:43731/user/jenkins/test-data/7fb631ac-f516-8a5a-0a27-1cd781752804/data/hbase/namespace/9565ecffd77110088eb77c1a304ad17a/info/050756151753418588885a357ae303fc 2024-12-02T21:10:29,629 INFO [RS_FLUSH_OPERATIONS-regionserver/7d4f3b9a7081:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/7fb631ac-f516-8a5a-0a27-1cd781752804/data/hbase/namespace/9565ecffd77110088eb77c1a304ad17a/info/050756151753418588885a357ae303fc, entries=2, sequenceid=6, filesize=4.9 K 2024-12-02T21:10:29,630 INFO [RS_FLUSH_OPERATIONS-regionserver/7d4f3b9a7081:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(3040): Finished flush of dataSize ~78 B/78, heapSize ~472 B/472, currentSize=0 B/0 for 9565ecffd77110088eb77c1a304ad17a in 33ms, sequenceid=6, compaction requested=false 2024-12-02T21:10:29,630 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7d4f3b9a7081:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2538): Flush status journal for 9565ecffd77110088eb77c1a304ad17a: 2024-12-02T21:10:29,630 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7d4f3b9a7081:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on hbase:namespace,,1733173818287.9565ecffd77110088eb77c1a304ad17a. 2024-12-02T21:10:29,632 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7d4f3b9a7081:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=13 2024-12-02T21:10:29,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44147 {}] master.HMaster(4106): Remote procedure done, pid=13 2024-12-02T21:10:29,637 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=13, resume processing ppid=12 2024-12-02T21:10:29,637 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=13, ppid=12, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 198 msec 2024-12-02T21:10:29,639 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=12, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=hbase:namespace in 209 msec 2024-12-02T21:10:30,418 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.meta.1733173708307.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:10:31,418 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.meta.1733173708307.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:10:32,419 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.meta.1733173708307.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:10:33,420 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.meta.1733173708307.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:10:34,421 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.meta.1733173708307.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:10:35,422 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.meta.1733173708307.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:10:36,423 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.meta.1733173708307.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:10:37,423 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.meta.1733173708307.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:10:38,424 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.meta.1733173708307.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:10:39,425 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.meta.1733173708307.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:10:39,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44147 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-12-02T21:10:39,437 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: hbase:namespace, procId: 12 completed 2024-12-02T21:10:39,445 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44147 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-02T21:10:39,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44147 {}] procedure2.ProcedureExecutor(1098): Stored pid=14, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=14, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-02T21:10:39,447 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=14, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=14, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-12-02T21:10:39,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44147 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-12-02T21:10:39,448 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=14, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=14, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-02T21:10:39,448 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=15, ppid=14, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-02T21:10:39,600 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 7d4f3b9a7081,42581,1733173817044 2024-12-02T21:10:39,600 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42581 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=15 2024-12-02T21:10:39,601 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7d4f3b9a7081:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733173819401.092e5fccf24bb21e6cbee0026354f779. 2024-12-02T21:10:39,601 INFO [RS_FLUSH_OPERATIONS-regionserver/7d4f3b9a7081:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2837): Flushing 092e5fccf24bb21e6cbee0026354f779 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-12-02T21:10:39,625 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7d4f3b9a7081:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/7fb631ac-f516-8a5a-0a27-1cd781752804/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/092e5fccf24bb21e6cbee0026354f779/.tmp/info/137c2209a7114511917267f75628035d is 1080, key is row0001/info:/1733173839440/Put/seqid=0 2024-12-02T21:10:39,657 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44765 is added to blk_1073741840_1016 (size=6033) 2024-12-02T21:10:39,658 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34831 is added to blk_1073741840_1016 (size=6033) 2024-12-02T21:10:39,659 INFO [RS_FLUSH_OPERATIONS-regionserver/7d4f3b9a7081:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/7fb631ac-f516-8a5a-0a27-1cd781752804/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/092e5fccf24bb21e6cbee0026354f779/.tmp/info/137c2209a7114511917267f75628035d 2024-12-02T21:10:39,673 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7d4f3b9a7081:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/7fb631ac-f516-8a5a-0a27-1cd781752804/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/092e5fccf24bb21e6cbee0026354f779/.tmp/info/137c2209a7114511917267f75628035d as hdfs://localhost:43731/user/jenkins/test-data/7fb631ac-f516-8a5a-0a27-1cd781752804/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/092e5fccf24bb21e6cbee0026354f779/info/137c2209a7114511917267f75628035d 2024-12-02T21:10:39,698 INFO [RS_FLUSH_OPERATIONS-regionserver/7d4f3b9a7081:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/7fb631ac-f516-8a5a-0a27-1cd781752804/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/092e5fccf24bb21e6cbee0026354f779/info/137c2209a7114511917267f75628035d, entries=1, sequenceid=5, filesize=5.9 K 2024-12-02T21:10:39,701 INFO [RS_FLUSH_OPERATIONS-regionserver/7d4f3b9a7081:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(3040): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 092e5fccf24bb21e6cbee0026354f779 in 100ms, sequenceid=5, compaction requested=false 2024-12-02T21:10:39,701 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7d4f3b9a7081:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2538): Flush status journal for 092e5fccf24bb21e6cbee0026354f779: 2024-12-02T21:10:39,701 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7d4f3b9a7081:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733173819401.092e5fccf24bb21e6cbee0026354f779. 2024-12-02T21:10:39,701 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7d4f3b9a7081:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=15 2024-12-02T21:10:39,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44147 {}] master.HMaster(4106): Remote procedure done, pid=15 2024-12-02T21:10:39,708 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=15, resume processing ppid=14 2024-12-02T21:10:39,708 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=15, ppid=14, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 256 msec 2024-12-02T21:10:39,711 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=14, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=14, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 264 msec 2024-12-02T21:10:40,426 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.meta.1733173708307.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:10:41,426 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.meta.1733173708307.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:10:42,427 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.meta.1733173708307.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:10:43,428 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.meta.1733173708307.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:10:44,429 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.meta.1733173708307.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:10:45,430 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.meta.1733173708307.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:10:46,430 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.meta.1733173708307.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:10:46,714 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-02T21:10:47,431 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.meta.1733173708307.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:10:48,432 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.meta.1733173708307.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:10:49,433 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.meta.1733173708307.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:10:49,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44147 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-12-02T21:10:49,449 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling, procId: 14 completed 2024-12-02T21:10:49,458 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44147 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-02T21:10:49,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44147 {}] procedure2.ProcedureExecutor(1098): Stored pid=16, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=16, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-02T21:10:49,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44147 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-12-02T21:10:49,460 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=16, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=16, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-12-02T21:10:49,461 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=16, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=16, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-02T21:10:49,461 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=17, ppid=16, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-02T21:10:49,613 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 7d4f3b9a7081,42581,1733173817044 2024-12-02T21:10:49,614 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42581 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-12-02T21:10:49,614 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7d4f3b9a7081:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733173819401.092e5fccf24bb21e6cbee0026354f779. 2024-12-02T21:10:49,615 INFO [RS_FLUSH_OPERATIONS-regionserver/7d4f3b9a7081:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2837): Flushing 092e5fccf24bb21e6cbee0026354f779 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-12-02T21:10:49,619 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7d4f3b9a7081:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/7fb631ac-f516-8a5a-0a27-1cd781752804/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/092e5fccf24bb21e6cbee0026354f779/.tmp/info/fd783195938e4caab9c458c3600f4aac is 1080, key is row0002/info:/1733173849450/Put/seqid=0 2024-12-02T21:10:49,634 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34831 is added to blk_1073741841_1017 (size=6033) 2024-12-02T21:10:49,635 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44765 is added to blk_1073741841_1017 (size=6033) 2024-12-02T21:10:49,636 INFO [RS_FLUSH_OPERATIONS-regionserver/7d4f3b9a7081:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=9 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/7fb631ac-f516-8a5a-0a27-1cd781752804/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/092e5fccf24bb21e6cbee0026354f779/.tmp/info/fd783195938e4caab9c458c3600f4aac 2024-12-02T21:10:49,648 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7d4f3b9a7081:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/7fb631ac-f516-8a5a-0a27-1cd781752804/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/092e5fccf24bb21e6cbee0026354f779/.tmp/info/fd783195938e4caab9c458c3600f4aac as hdfs://localhost:43731/user/jenkins/test-data/7fb631ac-f516-8a5a-0a27-1cd781752804/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/092e5fccf24bb21e6cbee0026354f779/info/fd783195938e4caab9c458c3600f4aac 2024-12-02T21:10:49,658 INFO [RS_FLUSH_OPERATIONS-regionserver/7d4f3b9a7081:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/7fb631ac-f516-8a5a-0a27-1cd781752804/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/092e5fccf24bb21e6cbee0026354f779/info/fd783195938e4caab9c458c3600f4aac, entries=1, sequenceid=9, filesize=5.9 K 2024-12-02T21:10:49,659 INFO [RS_FLUSH_OPERATIONS-regionserver/7d4f3b9a7081:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(3040): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 092e5fccf24bb21e6cbee0026354f779 in 45ms, sequenceid=9, compaction requested=false 2024-12-02T21:10:49,659 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7d4f3b9a7081:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2538): Flush status journal for 092e5fccf24bb21e6cbee0026354f779: 2024-12-02T21:10:49,659 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7d4f3b9a7081:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733173819401.092e5fccf24bb21e6cbee0026354f779. 2024-12-02T21:10:49,660 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7d4f3b9a7081:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=17 2024-12-02T21:10:49,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44147 {}] master.HMaster(4106): Remote procedure done, pid=17 2024-12-02T21:10:49,662 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=17, resume processing ppid=16 2024-12-02T21:10:49,662 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=17, ppid=16, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 200 msec 2024-12-02T21:10:49,664 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=16, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=16, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 205 msec 2024-12-02T21:10:50,433 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.meta.1733173708307.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:10:51,395 DEBUG [BootstrapNodeManager {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-02T21:10:51,397 INFO [RS-EventLoopGroup-10-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50614, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.4 (auth:SIMPLE), service=RegionServerStatusService 2024-12-02T21:10:51,434 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.meta.1733173708307.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:10:52,435 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.meta.1733173708307.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:10:53,436 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.meta.1733173708307.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:10:54,436 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.meta.1733173708307.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:10:55,437 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.meta.1733173708307.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:10:56,438 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.meta.1733173708307.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:10:57,440 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.meta.1733173708307.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:10:58,441 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.meta.1733173708307.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:10:59,442 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.meta.1733173708307.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:10:59,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44147 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-12-02T21:10:59,463 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling, procId: 16 completed 2024-12-02T21:10:59,469 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7d4f3b9a7081%2C42581%2C1733173817044.1733173859468 2024-12-02T21:10:59,478 INFO [Time-limited test {}] wal.AbstractFSWAL(837): Rolled WAL /user/jenkins/test-data/7fb631ac-f516-8a5a-0a27-1cd781752804/WALs/7d4f3b9a7081,42581,1733173817044/7d4f3b9a7081%2C42581%2C1733173817044.1733173817560 with entries=13, filesize=6.41 KB; new WAL /user/jenkins/test-data/7fb631ac-f516-8a5a-0a27-1cd781752804/WALs/7d4f3b9a7081,42581,1733173817044/7d4f3b9a7081%2C42581%2C1733173817044.1733173859468 2024-12-02T21:10:59,479 DEBUG [Time-limited test {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33051:33051),(127.0.0.1/127.0.0.1:46493:46493)] 2024-12-02T21:10:59,479 DEBUG [Time-limited test {}] wal.AbstractFSWAL(751): hdfs://localhost:43731/user/jenkins/test-data/7fb631ac-f516-8a5a-0a27-1cd781752804/WALs/7d4f3b9a7081,42581,1733173817044/7d4f3b9a7081%2C42581%2C1733173817044.1733173817560 is not closed yet, will try archiving it next time 2024-12-02T21:10:59,480 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44765 is added to blk_1073741832_1008 (size=6574) 2024-12-02T21:10:59,480 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34831 is added to blk_1073741832_1008 (size=6574) 2024-12-02T21:10:59,482 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44147 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-02T21:10:59,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44147 {}] procedure2.ProcedureExecutor(1098): Stored pid=18, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=18, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-02T21:10:59,484 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=18, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=18, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-12-02T21:10:59,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44147 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-12-02T21:10:59,484 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=18, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=18, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-02T21:10:59,484 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=19, ppid=18, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-02T21:10:59,636 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 7d4f3b9a7081,42581,1733173817044 2024-12-02T21:10:59,637 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42581 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-12-02T21:10:59,638 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7d4f3b9a7081:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733173819401.092e5fccf24bb21e6cbee0026354f779. 2024-12-02T21:10:59,638 INFO [RS_FLUSH_OPERATIONS-regionserver/7d4f3b9a7081:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2837): Flushing 092e5fccf24bb21e6cbee0026354f779 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-12-02T21:10:59,650 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7d4f3b9a7081:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/7fb631ac-f516-8a5a-0a27-1cd781752804/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/092e5fccf24bb21e6cbee0026354f779/.tmp/info/68c8235e89a841559fba306a0f39eedb is 1080, key is row0003/info:/1733173859465/Put/seqid=0 2024-12-02T21:10:59,658 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44765 is added to blk_1073741843_1019 (size=6033) 2024-12-02T21:10:59,658 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34831 is added to blk_1073741843_1019 (size=6033) 2024-12-02T21:10:59,659 INFO [RS_FLUSH_OPERATIONS-regionserver/7d4f3b9a7081:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/7fb631ac-f516-8a5a-0a27-1cd781752804/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/092e5fccf24bb21e6cbee0026354f779/.tmp/info/68c8235e89a841559fba306a0f39eedb 2024-12-02T21:10:59,665 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7d4f3b9a7081:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/7fb631ac-f516-8a5a-0a27-1cd781752804/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/092e5fccf24bb21e6cbee0026354f779/.tmp/info/68c8235e89a841559fba306a0f39eedb as hdfs://localhost:43731/user/jenkins/test-data/7fb631ac-f516-8a5a-0a27-1cd781752804/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/092e5fccf24bb21e6cbee0026354f779/info/68c8235e89a841559fba306a0f39eedb 2024-12-02T21:10:59,672 INFO [RS_FLUSH_OPERATIONS-regionserver/7d4f3b9a7081:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/7fb631ac-f516-8a5a-0a27-1cd781752804/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/092e5fccf24bb21e6cbee0026354f779/info/68c8235e89a841559fba306a0f39eedb, entries=1, sequenceid=13, filesize=5.9 K 2024-12-02T21:10:59,673 INFO [RS_FLUSH_OPERATIONS-regionserver/7d4f3b9a7081:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(3040): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 092e5fccf24bb21e6cbee0026354f779 in 35ms, sequenceid=13, compaction requested=true 2024-12-02T21:10:59,673 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7d4f3b9a7081:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2538): Flush status journal for 092e5fccf24bb21e6cbee0026354f779: 2024-12-02T21:10:59,673 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7d4f3b9a7081:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733173819401.092e5fccf24bb21e6cbee0026354f779. 2024-12-02T21:10:59,673 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7d4f3b9a7081:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=19 2024-12-02T21:10:59,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44147 {}] master.HMaster(4106): Remote procedure done, pid=19 2024-12-02T21:10:59,677 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=19, resume processing ppid=18 2024-12-02T21:10:59,677 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=19, ppid=18, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 191 msec 2024-12-02T21:10:59,680 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=18, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=18, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 196 msec 2024-12-02T21:10:59,828 INFO [master/7d4f3b9a7081:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-12-02T21:10:59,828 INFO [master/7d4f3b9a7081:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-12-02T21:11:00,443 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.meta.1733173708307.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:11:01,444 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.meta.1733173708307.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:11:02,444 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.meta.1733173708307.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:11:03,445 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.meta.1733173708307.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:11:04,118 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(324): Region 9565ecffd77110088eb77c1a304ad17a, had cached 0 bytes from a total of 5037 2024-12-02T21:11:04,446 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.meta.1733173708307.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:11:04,774 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(324): Region 092e5fccf24bb21e6cbee0026354f779, had cached 0 bytes from a total of 18099 2024-12-02T21:11:05,447 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.meta.1733173708307.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:11:06,447 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.meta.1733173708307.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:11:07,448 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.meta.1733173708307.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:11:08,449 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.meta.1733173708307.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:11:09,449 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.meta.1733173708307.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:11:09,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44147 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-12-02T21:11:09,485 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling, procId: 18 completed 2024-12-02T21:11:09,485 DEBUG [Time-limited test {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-02T21:11:09,486 DEBUG [Time-limited test {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 18099 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-02T21:11:09,486 DEBUG [Time-limited test {}] regionserver.HStore(1540): 092e5fccf24bb21e6cbee0026354f779/info is initiating minor compaction (all files) 2024-12-02T21:11:09,486 INFO [Time-limited test {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-02T21:11:09,486 INFO [Time-limited test {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-02T21:11:09,487 INFO [Time-limited test {}] regionserver.HRegion(2351): Starting compaction of 092e5fccf24bb21e6cbee0026354f779/info in TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733173819401.092e5fccf24bb21e6cbee0026354f779. 2024-12-02T21:11:09,487 INFO [Time-limited test {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43731/user/jenkins/test-data/7fb631ac-f516-8a5a-0a27-1cd781752804/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/092e5fccf24bb21e6cbee0026354f779/info/137c2209a7114511917267f75628035d, hdfs://localhost:43731/user/jenkins/test-data/7fb631ac-f516-8a5a-0a27-1cd781752804/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/092e5fccf24bb21e6cbee0026354f779/info/fd783195938e4caab9c458c3600f4aac, hdfs://localhost:43731/user/jenkins/test-data/7fb631ac-f516-8a5a-0a27-1cd781752804/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/092e5fccf24bb21e6cbee0026354f779/info/68c8235e89a841559fba306a0f39eedb] into tmpdir=hdfs://localhost:43731/user/jenkins/test-data/7fb631ac-f516-8a5a-0a27-1cd781752804/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/092e5fccf24bb21e6cbee0026354f779/.tmp, totalSize=17.7 K 2024-12-02T21:11:09,487 DEBUG [Time-limited test {}] compactions.Compactor(224): Compacting 137c2209a7114511917267f75628035d, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=5, earliestPutTs=1733173839440 2024-12-02T21:11:09,488 DEBUG [Time-limited test {}] compactions.Compactor(224): Compacting fd783195938e4caab9c458c3600f4aac, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=9, earliestPutTs=1733173849450 2024-12-02T21:11:09,488 DEBUG [Time-limited test {}] compactions.Compactor(224): Compacting 68c8235e89a841559fba306a0f39eedb, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1733173859465 2024-12-02T21:11:09,500 INFO [Time-limited test {}] throttle.PressureAwareThroughputController(145): 092e5fccf24bb21e6cbee0026354f779#info#compaction#32 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-02T21:11:09,500 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/7fb631ac-f516-8a5a-0a27-1cd781752804/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/092e5fccf24bb21e6cbee0026354f779/.tmp/info/7ba8f4c93cbe4a18943fc57ef602781d is 1080, key is row0001/info:/1733173839440/Put/seqid=0 2024-12-02T21:11:09,507 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44765 is added to blk_1073741844_1020 (size=8296) 2024-12-02T21:11:09,507 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34831 is added to blk_1073741844_1020 (size=8296) 2024-12-02T21:11:09,515 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/7fb631ac-f516-8a5a-0a27-1cd781752804/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/092e5fccf24bb21e6cbee0026354f779/.tmp/info/7ba8f4c93cbe4a18943fc57ef602781d as hdfs://localhost:43731/user/jenkins/test-data/7fb631ac-f516-8a5a-0a27-1cd781752804/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/092e5fccf24bb21e6cbee0026354f779/info/7ba8f4c93cbe4a18943fc57ef602781d 2024-12-02T21:11:09,521 INFO [Time-limited test {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 092e5fccf24bb21e6cbee0026354f779/info of 092e5fccf24bb21e6cbee0026354f779 into 7ba8f4c93cbe4a18943fc57ef602781d(size=8.1 K), total size for store is 8.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T21:11:09,521 DEBUG [Time-limited test {}] regionserver.HRegion(2381): Compaction status journal for 092e5fccf24bb21e6cbee0026354f779: 2024-12-02T21:11:09,524 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7d4f3b9a7081%2C42581%2C1733173817044.1733173869524 2024-12-02T21:11:09,539 INFO [Time-limited test {}] wal.AbstractFSWAL(837): Rolled WAL /user/jenkins/test-data/7fb631ac-f516-8a5a-0a27-1cd781752804/WALs/7d4f3b9a7081,42581,1733173817044/7d4f3b9a7081%2C42581%2C1733173817044.1733173859468 with entries=4, filesize=2.45 KB; new WAL /user/jenkins/test-data/7fb631ac-f516-8a5a-0a27-1cd781752804/WALs/7d4f3b9a7081,42581,1733173817044/7d4f3b9a7081%2C42581%2C1733173817044.1733173869524 2024-12-02T21:11:09,539 DEBUG [Time-limited test {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33051:33051),(127.0.0.1/127.0.0.1:46493:46493)] 2024-12-02T21:11:09,539 DEBUG [Time-limited test {}] wal.AbstractFSWAL(751): hdfs://localhost:43731/user/jenkins/test-data/7fb631ac-f516-8a5a-0a27-1cd781752804/WALs/7d4f3b9a7081,42581,1733173817044/7d4f3b9a7081%2C42581%2C1733173817044.1733173859468 is not closed yet, will try archiving it next time 2024-12-02T21:11:09,540 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(818): Archiving hdfs://localhost:43731/user/jenkins/test-data/7fb631ac-f516-8a5a-0a27-1cd781752804/WALs/7d4f3b9a7081,42581,1733173817044/7d4f3b9a7081%2C42581%2C1733173817044.1733173817560 to hdfs://localhost:43731/user/jenkins/test-data/7fb631ac-f516-8a5a-0a27-1cd781752804/oldWALs/7d4f3b9a7081%2C42581%2C1733173817044.1733173817560 2024-12-02T21:11:09,541 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34831 is added to blk_1073741842_1018 (size=2520) 2024-12-02T21:11:09,541 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44765 is added to blk_1073741842_1018 (size=2520) 2024-12-02T21:11:09,544 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44147 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-02T21:11:09,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44147 {}] procedure2.ProcedureExecutor(1098): Stored pid=20, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=20, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-02T21:11:09,545 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=20, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=20, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-12-02T21:11:09,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44147 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-12-02T21:11:09,546 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=20, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=20, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-02T21:11:09,546 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=21, ppid=20, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-02T21:11:09,698 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 7d4f3b9a7081,42581,1733173817044 2024-12-02T21:11:09,699 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42581 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-12-02T21:11:09,699 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7d4f3b9a7081:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733173819401.092e5fccf24bb21e6cbee0026354f779. 2024-12-02T21:11:09,699 INFO [RS_FLUSH_OPERATIONS-regionserver/7d4f3b9a7081:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2837): Flushing 092e5fccf24bb21e6cbee0026354f779 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-12-02T21:11:09,704 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7d4f3b9a7081:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/7fb631ac-f516-8a5a-0a27-1cd781752804/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/092e5fccf24bb21e6cbee0026354f779/.tmp/info/54ca9f699f9749669c716a2485051429 is 1080, key is row0000/info:/1733173869522/Put/seqid=0 2024-12-02T21:11:09,711 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44765 is added to blk_1073741846_1022 (size=6033) 2024-12-02T21:11:09,711 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34831 is added to blk_1073741846_1022 (size=6033) 2024-12-02T21:11:09,712 INFO [RS_FLUSH_OPERATIONS-regionserver/7d4f3b9a7081:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=18 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/7fb631ac-f516-8a5a-0a27-1cd781752804/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/092e5fccf24bb21e6cbee0026354f779/.tmp/info/54ca9f699f9749669c716a2485051429 2024-12-02T21:11:09,720 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7d4f3b9a7081:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/7fb631ac-f516-8a5a-0a27-1cd781752804/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/092e5fccf24bb21e6cbee0026354f779/.tmp/info/54ca9f699f9749669c716a2485051429 as hdfs://localhost:43731/user/jenkins/test-data/7fb631ac-f516-8a5a-0a27-1cd781752804/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/092e5fccf24bb21e6cbee0026354f779/info/54ca9f699f9749669c716a2485051429 2024-12-02T21:11:09,727 INFO [RS_FLUSH_OPERATIONS-regionserver/7d4f3b9a7081:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/7fb631ac-f516-8a5a-0a27-1cd781752804/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/092e5fccf24bb21e6cbee0026354f779/info/54ca9f699f9749669c716a2485051429, entries=1, sequenceid=18, filesize=5.9 K 2024-12-02T21:11:09,728 INFO [RS_FLUSH_OPERATIONS-regionserver/7d4f3b9a7081:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(3040): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 092e5fccf24bb21e6cbee0026354f779 in 29ms, sequenceid=18, compaction requested=false 2024-12-02T21:11:09,728 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7d4f3b9a7081:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2538): Flush status journal for 092e5fccf24bb21e6cbee0026354f779: 2024-12-02T21:11:09,728 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7d4f3b9a7081:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733173819401.092e5fccf24bb21e6cbee0026354f779. 2024-12-02T21:11:09,728 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7d4f3b9a7081:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=21 2024-12-02T21:11:09,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44147 {}] master.HMaster(4106): Remote procedure done, pid=21 2024-12-02T21:11:09,733 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=21, resume processing ppid=20 2024-12-02T21:11:09,733 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=21, ppid=20, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 184 msec 2024-12-02T21:11:09,736 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=20, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=20, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 189 msec 2024-12-02T21:11:10,450 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.meta.1733173708307.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:11:11,451 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.meta.1733173708307.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:11:12,452 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.meta.1733173708307.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:11:13,452 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.meta.1733173708307.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:11:14,453 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.meta.1733173708307.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:11:15,454 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.meta.1733173708307.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:11:16,455 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.meta.1733173708307.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:11:16,714 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-02T21:11:17,456 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.meta.1733173708307.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:11:18,457 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.meta.1733173708307.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:11:19,458 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.meta.1733173708307.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:11:19,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44147 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-12-02T21:11:19,547 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling, procId: 20 completed 2024-12-02T21:11:19,550 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7d4f3b9a7081%2C42581%2C1733173817044.1733173879550 2024-12-02T21:11:19,560 INFO [Time-limited test {}] wal.AbstractFSWAL(837): Rolled WAL /user/jenkins/test-data/7fb631ac-f516-8a5a-0a27-1cd781752804/WALs/7d4f3b9a7081,42581,1733173817044/7d4f3b9a7081%2C42581%2C1733173817044.1733173869524 with entries=3, filesize=1.97 KB; new WAL /user/jenkins/test-data/7fb631ac-f516-8a5a-0a27-1cd781752804/WALs/7d4f3b9a7081,42581,1733173817044/7d4f3b9a7081%2C42581%2C1733173817044.1733173879550 2024-12-02T21:11:19,560 DEBUG [Time-limited test {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33051:33051),(127.0.0.1/127.0.0.1:46493:46493)] 2024-12-02T21:11:19,560 DEBUG [Time-limited test {}] wal.AbstractFSWAL(751): hdfs://localhost:43731/user/jenkins/test-data/7fb631ac-f516-8a5a-0a27-1cd781752804/WALs/7d4f3b9a7081,42581,1733173817044/7d4f3b9a7081%2C42581%2C1733173817044.1733173869524 is not closed yet, will try archiving it next time 2024-12-02T21:11:19,560 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(818): Archiving hdfs://localhost:43731/user/jenkins/test-data/7fb631ac-f516-8a5a-0a27-1cd781752804/WALs/7d4f3b9a7081,42581,1733173817044/7d4f3b9a7081%2C42581%2C1733173817044.1733173859468 to hdfs://localhost:43731/user/jenkins/test-data/7fb631ac-f516-8a5a-0a27-1cd781752804/oldWALs/7d4f3b9a7081%2C42581%2C1733173817044.1733173859468 2024-12-02T21:11:19,561 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1340): Shutting down minicluster 2024-12-02T21:11:19,561 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-12-02T21:11:19,561 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x1ff4d5b3 to 127.0.0.1:59576 2024-12-02T21:11:19,561 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T21:11:19,561 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-02T21:11:19,561 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1695405081, stopped=false 2024-12-02T21:11:19,561 INFO [Time-limited test {}] master.ServerManager(987): Cluster shutdown requested of master=7d4f3b9a7081,44147,1733173816735 2024-12-02T21:11:19,562 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34831 is added to blk_1073741845_1021 (size=2026) 2024-12-02T21:11:19,563 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44765 is added to blk_1073741845_1021 (size=2026) 2024-12-02T21:11:19,583 INFO [Time-limited test {}] procedure2.ProcedureExecutor(700): Stopping 2024-12-02T21:11:19,583 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42581-0x101992b50010001, quorum=127.0.0.1:59576, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-02T21:11:19,583 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44147-0x101992b50010000, quorum=127.0.0.1:59576, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-02T21:11:19,583 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42581-0x101992b50010001, quorum=127.0.0.1:59576, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T21:11:19,583 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T21:11:19,583 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44147-0x101992b50010000, quorum=127.0.0.1:59576, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T21:11:19,583 INFO [Time-limited test {}] regionserver.HRegionServer(2561): ***** STOPPING region server '7d4f3b9a7081,42581,1733173817044' ***** 2024-12-02T21:11:19,583 INFO [Time-limited test {}] regionserver.HRegionServer(2575): STOPPED: Shutdown requested 2024-12-02T21:11:19,584 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:42581-0x101992b50010001, quorum=127.0.0.1:59576, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-02T21:11:19,584 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:44147-0x101992b50010000, quorum=127.0.0.1:59576, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-02T21:11:19,584 INFO [RS:0;7d4f3b9a7081:42581 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-02T21:11:19,584 INFO [RS:0;7d4f3b9a7081:42581 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-02T21:11:19,584 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(373): MemStoreFlusher.0 exiting 2024-12-02T21:11:19,584 INFO [RS:0;7d4f3b9a7081:42581 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-02T21:11:19,584 INFO [RS:0;7d4f3b9a7081:42581 {}] regionserver.HRegionServer(3579): Received CLOSE for 9565ecffd77110088eb77c1a304ad17a 2024-12-02T21:11:19,584 INFO [RS:0;7d4f3b9a7081:42581 {}] regionserver.HRegionServer(3579): Received CLOSE for 092e5fccf24bb21e6cbee0026354f779 2024-12-02T21:11:19,584 INFO [RS:0;7d4f3b9a7081:42581 {}] regionserver.HRegionServer(1224): stopping server 7d4f3b9a7081,42581,1733173817044 2024-12-02T21:11:19,584 DEBUG [RS:0;7d4f3b9a7081:42581 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T21:11:19,584 DEBUG [RS_CLOSE_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1681): Closing 9565ecffd77110088eb77c1a304ad17a, disabling compactions & flushes 2024-12-02T21:11:19,584 INFO [RS:0;7d4f3b9a7081:42581 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-02T21:11:19,584 INFO [RS_CLOSE_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1703): Closing region hbase:namespace,,1733173818287.9565ecffd77110088eb77c1a304ad17a. 2024-12-02T21:11:19,584 INFO [RS:0;7d4f3b9a7081:42581 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-02T21:11:19,584 DEBUG [RS_CLOSE_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:namespace,,1733173818287.9565ecffd77110088eb77c1a304ad17a. 2024-12-02T21:11:19,584 INFO [RS:0;7d4f3b9a7081:42581 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-02T21:11:19,584 DEBUG [RS_CLOSE_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1791): Acquired close lock on hbase:namespace,,1733173818287.9565ecffd77110088eb77c1a304ad17a. after waiting 0 ms 2024-12-02T21:11:19,584 DEBUG [RS_CLOSE_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1801): Updates disabled for region hbase:namespace,,1733173818287.9565ecffd77110088eb77c1a304ad17a. 2024-12-02T21:11:19,584 INFO [RS:0;7d4f3b9a7081:42581 {}] regionserver.HRegionServer(3579): Received CLOSE for 1588230740 2024-12-02T21:11:19,585 INFO [RS:0;7d4f3b9a7081:42581 {}] regionserver.HRegionServer(1599): Waiting on 3 regions to close 2024-12-02T21:11:19,585 DEBUG [RS:0;7d4f3b9a7081:42581 {}] regionserver.HRegionServer(1603): Online Regions={9565ecffd77110088eb77c1a304ad17a=hbase:namespace,,1733173818287.9565ecffd77110088eb77c1a304ad17a., 1588230740=hbase:meta,,1.1588230740, 092e5fccf24bb21e6cbee0026354f779=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733173819401.092e5fccf24bb21e6cbee0026354f779.} 2024-12-02T21:11:19,585 DEBUG [RS:0;7d4f3b9a7081:42581 {}] regionserver.HRegionServer(1629): Waiting on 092e5fccf24bb21e6cbee0026354f779, 1588230740, 9565ecffd77110088eb77c1a304ad17a 2024-12-02T21:11:19,585 DEBUG [RS_CLOSE_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1681): Closing 1588230740, disabling compactions & flushes 2024-12-02T21:11:19,585 INFO [RS_CLOSE_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1703): Closing region hbase:meta,,1.1588230740 2024-12-02T21:11:19,585 DEBUG [RS_CLOSE_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:meta,,1.1588230740 2024-12-02T21:11:19,585 DEBUG [RS_CLOSE_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1791): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-02T21:11:19,585 DEBUG [RS_CLOSE_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1801): Updates disabled for region hbase:meta,,1.1588230740 2024-12-02T21:11:19,585 INFO [RS_CLOSE_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2837): Flushing 1588230740 3/3 column families, dataSize=3.05 KB heapSize=5.55 KB 2024-12-02T21:11:19,588 DEBUG [RS_CLOSE_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:43731/user/jenkins/test-data/7fb631ac-f516-8a5a-0a27-1cd781752804/data/hbase/namespace/9565ecffd77110088eb77c1a304ad17a/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-02T21:11:19,589 INFO [RS_CLOSE_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1922): Closed hbase:namespace,,1733173818287.9565ecffd77110088eb77c1a304ad17a. 2024-12-02T21:11:19,589 DEBUG [RS_CLOSE_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1635): Region close journal for 9565ecffd77110088eb77c1a304ad17a: 2024-12-02T21:11:19,589 DEBUG [RS_CLOSE_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed hbase:namespace,,1733173818287.9565ecffd77110088eb77c1a304ad17a. 2024-12-02T21:11:19,589 DEBUG [RS_CLOSE_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1681): Closing 092e5fccf24bb21e6cbee0026354f779, disabling compactions & flushes 2024-12-02T21:11:19,589 INFO [RS_CLOSE_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1703): Closing region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733173819401.092e5fccf24bb21e6cbee0026354f779. 2024-12-02T21:11:19,589 DEBUG [RS_CLOSE_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733173819401.092e5fccf24bb21e6cbee0026354f779. 2024-12-02T21:11:19,589 DEBUG [RS_CLOSE_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1791): Acquired close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733173819401.092e5fccf24bb21e6cbee0026354f779. after waiting 0 ms 2024-12-02T21:11:19,589 DEBUG [RS_CLOSE_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1801): Updates disabled for region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733173819401.092e5fccf24bb21e6cbee0026354f779. 2024-12-02T21:11:19,589 INFO [RS_CLOSE_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2837): Flushing 092e5fccf24bb21e6cbee0026354f779 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-12-02T21:11:19,593 DEBUG [RS_CLOSE_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/7fb631ac-f516-8a5a-0a27-1cd781752804/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/092e5fccf24bb21e6cbee0026354f779/.tmp/info/074933310a0146e094e3a599a701435f is 1080, key is row0001/info:/1733173879548/Put/seqid=0 2024-12-02T21:11:19,599 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44765 is added to blk_1073741848_1024 (size=6033) 2024-12-02T21:11:19,599 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34831 is added to blk_1073741848_1024 (size=6033) 2024-12-02T21:11:19,600 INFO [RS_CLOSE_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=22 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/7fb631ac-f516-8a5a-0a27-1cd781752804/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/092e5fccf24bb21e6cbee0026354f779/.tmp/info/074933310a0146e094e3a599a701435f 2024-12-02T21:11:19,604 DEBUG [RS_CLOSE_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/7fb631ac-f516-8a5a-0a27-1cd781752804/data/hbase/meta/1588230740/.tmp/info/76982ba8cfd94e9fba81ea57750aa622 is 227, key is TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733173819401.092e5fccf24bb21e6cbee0026354f779./info:regioninfo/1733173819786/Put/seqid=0 2024-12-02T21:11:19,606 DEBUG [RS_CLOSE_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/7fb631ac-f516-8a5a-0a27-1cd781752804/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/092e5fccf24bb21e6cbee0026354f779/.tmp/info/074933310a0146e094e3a599a701435f as hdfs://localhost:43731/user/jenkins/test-data/7fb631ac-f516-8a5a-0a27-1cd781752804/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/092e5fccf24bb21e6cbee0026354f779/info/074933310a0146e094e3a599a701435f 2024-12-02T21:11:19,608 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34831 is added to blk_1073741849_1025 (size=8430) 2024-12-02T21:11:19,609 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44765 is added to blk_1073741849_1025 (size=8430) 2024-12-02T21:11:19,609 INFO [RS_CLOSE_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.79 KB at sequenceid=14 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/7fb631ac-f516-8a5a-0a27-1cd781752804/data/hbase/meta/1588230740/.tmp/info/76982ba8cfd94e9fba81ea57750aa622 2024-12-02T21:11:19,611 INFO [RS_CLOSE_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/7fb631ac-f516-8a5a-0a27-1cd781752804/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/092e5fccf24bb21e6cbee0026354f779/info/074933310a0146e094e3a599a701435f, entries=1, sequenceid=22, filesize=5.9 K 2024-12-02T21:11:19,612 INFO [RS_CLOSE_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3040): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 092e5fccf24bb21e6cbee0026354f779 in 23ms, sequenceid=22, compaction requested=true 2024-12-02T21:11:19,613 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733173819401.092e5fccf24bb21e6cbee0026354f779.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:43731/user/jenkins/test-data/7fb631ac-f516-8a5a-0a27-1cd781752804/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/092e5fccf24bb21e6cbee0026354f779/info/137c2209a7114511917267f75628035d, hdfs://localhost:43731/user/jenkins/test-data/7fb631ac-f516-8a5a-0a27-1cd781752804/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/092e5fccf24bb21e6cbee0026354f779/info/fd783195938e4caab9c458c3600f4aac, hdfs://localhost:43731/user/jenkins/test-data/7fb631ac-f516-8a5a-0a27-1cd781752804/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/092e5fccf24bb21e6cbee0026354f779/info/68c8235e89a841559fba306a0f39eedb] to archive 2024-12-02T21:11:19,614 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733173819401.092e5fccf24bb21e6cbee0026354f779.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-02T21:11:19,615 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733173819401.092e5fccf24bb21e6cbee0026354f779.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/7fb631ac-f516-8a5a-0a27-1cd781752804/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/092e5fccf24bb21e6cbee0026354f779/info/137c2209a7114511917267f75628035d to hdfs://localhost:43731/user/jenkins/test-data/7fb631ac-f516-8a5a-0a27-1cd781752804/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/092e5fccf24bb21e6cbee0026354f779/info/137c2209a7114511917267f75628035d 2024-12-02T21:11:19,616 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733173819401.092e5fccf24bb21e6cbee0026354f779.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/7fb631ac-f516-8a5a-0a27-1cd781752804/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/092e5fccf24bb21e6cbee0026354f779/info/fd783195938e4caab9c458c3600f4aac to hdfs://localhost:43731/user/jenkins/test-data/7fb631ac-f516-8a5a-0a27-1cd781752804/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/092e5fccf24bb21e6cbee0026354f779/info/fd783195938e4caab9c458c3600f4aac 2024-12-02T21:11:19,617 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733173819401.092e5fccf24bb21e6cbee0026354f779.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43731/user/jenkins/test-data/7fb631ac-f516-8a5a-0a27-1cd781752804/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/092e5fccf24bb21e6cbee0026354f779/info/68c8235e89a841559fba306a0f39eedb to hdfs://localhost:43731/user/jenkins/test-data/7fb631ac-f516-8a5a-0a27-1cd781752804/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/092e5fccf24bb21e6cbee0026354f779/info/68c8235e89a841559fba306a0f39eedb 2024-12-02T21:11:19,621 DEBUG [RS_CLOSE_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:43731/user/jenkins/test-data/7fb631ac-f516-8a5a-0a27-1cd781752804/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/092e5fccf24bb21e6cbee0026354f779/recovered.edits/25.seqid, newMaxSeqId=25, maxSeqId=1 2024-12-02T21:11:19,622 INFO [RS_CLOSE_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1922): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733173819401.092e5fccf24bb21e6cbee0026354f779. 2024-12-02T21:11:19,622 DEBUG [RS_CLOSE_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1635): Region close journal for 092e5fccf24bb21e6cbee0026354f779: 2024-12-02T21:11:19,622 DEBUG [RS_CLOSE_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733173819401.092e5fccf24bb21e6cbee0026354f779. 2024-12-02T21:11:19,628 DEBUG [RS_CLOSE_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/7fb631ac-f516-8a5a-0a27-1cd781752804/data/hbase/meta/1588230740/.tmp/table/cf5ddd316d79435896f51f3c0a63743e is 89, key is TestLogRolling-testCompactionRecordDoesntBlockRolling/table:state/1733173819792/Put/seqid=0 2024-12-02T21:11:19,632 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34831 is added to blk_1073741850_1026 (size=5532) 2024-12-02T21:11:19,632 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44765 is added to blk_1073741850_1026 (size=5532) 2024-12-02T21:11:19,632 INFO [RS_CLOSE_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=264 B at sequenceid=14 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/7fb631ac-f516-8a5a-0a27-1cd781752804/data/hbase/meta/1588230740/.tmp/table/cf5ddd316d79435896f51f3c0a63743e 2024-12-02T21:11:19,637 DEBUG [RS_CLOSE_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/7fb631ac-f516-8a5a-0a27-1cd781752804/data/hbase/meta/1588230740/.tmp/info/76982ba8cfd94e9fba81ea57750aa622 as hdfs://localhost:43731/user/jenkins/test-data/7fb631ac-f516-8a5a-0a27-1cd781752804/data/hbase/meta/1588230740/info/76982ba8cfd94e9fba81ea57750aa622 2024-12-02T21:11:19,643 INFO [RS_CLOSE_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/7fb631ac-f516-8a5a-0a27-1cd781752804/data/hbase/meta/1588230740/info/76982ba8cfd94e9fba81ea57750aa622, entries=20, sequenceid=14, filesize=8.2 K 2024-12-02T21:11:19,644 DEBUG [RS_CLOSE_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/7fb631ac-f516-8a5a-0a27-1cd781752804/data/hbase/meta/1588230740/.tmp/table/cf5ddd316d79435896f51f3c0a63743e as hdfs://localhost:43731/user/jenkins/test-data/7fb631ac-f516-8a5a-0a27-1cd781752804/data/hbase/meta/1588230740/table/cf5ddd316d79435896f51f3c0a63743e 2024-12-02T21:11:19,649 INFO [RS_CLOSE_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/7fb631ac-f516-8a5a-0a27-1cd781752804/data/hbase/meta/1588230740/table/cf5ddd316d79435896f51f3c0a63743e, entries=4, sequenceid=14, filesize=5.4 K 2024-12-02T21:11:19,650 INFO [RS_CLOSE_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3040): Finished flush of dataSize ~3.05 KB/3122, heapSize ~5.27 KB/5400, currentSize=0 B/0 for 1588230740 in 65ms, sequenceid=14, compaction requested=false 2024-12-02T21:11:19,654 DEBUG [RS_CLOSE_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:43731/user/jenkins/test-data/7fb631ac-f516-8a5a-0a27-1cd781752804/data/hbase/meta/1588230740/recovered.edits/17.seqid, newMaxSeqId=17, maxSeqId=1 2024-12-02T21:11:19,655 DEBUG [RS_CLOSE_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-02T21:11:19,655 INFO [RS_CLOSE_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1922): Closed hbase:meta,,1.1588230740 2024-12-02T21:11:19,655 DEBUG [RS_CLOSE_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1635): Region close journal for 1588230740: 2024-12-02T21:11:19,655 DEBUG [RS_CLOSE_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-02T21:11:19,731 DEBUG [master/7d4f3b9a7081:0.Chore.1 {}] balancer.RegionLocationFinder(172): Locality for region 9565ecffd77110088eb77c1a304ad17a changed from -1.0 to 0.0, refreshing cache 2024-12-02T21:11:19,785 INFO [RS:0;7d4f3b9a7081:42581 {}] regionserver.HRegionServer(1250): stopping server 7d4f3b9a7081,42581,1733173817044; all regions closed. 2024-12-02T21:11:19,785 DEBUG [WAL-Shutdown-0 {}] wal.FSHLog(499): Closing WAL writer in /user/jenkins/test-data/7fb631ac-f516-8a5a-0a27-1cd781752804/WALs/7d4f3b9a7081,42581,1733173817044 2024-12-02T21:11:19,787 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34831 is added to blk_1073741834_1010 (size=4570) 2024-12-02T21:11:19,787 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44765 is added to blk_1073741834_1010 (size=4570) 2024-12-02T21:11:19,790 DEBUG [RS:0;7d4f3b9a7081:42581 {}] wal.AbstractFSWAL(1071): Moved 1 WAL file(s) to /user/jenkins/test-data/7fb631ac-f516-8a5a-0a27-1cd781752804/oldWALs 2024-12-02T21:11:19,790 INFO [RS:0;7d4f3b9a7081:42581 {}] wal.AbstractFSWAL(1074): Closed WAL: FSHLog 7d4f3b9a7081%2C42581%2C1733173817044.meta:.meta(num 1733173818177) 2024-12-02T21:11:19,790 DEBUG [WAL-Shutdown-0 {}] wal.FSHLog(499): Closing WAL writer in /user/jenkins/test-data/7fb631ac-f516-8a5a-0a27-1cd781752804/WALs/7d4f3b9a7081,42581,1733173817044 2024-12-02T21:11:19,792 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34831 is added to blk_1073741847_1023 (size=1545) 2024-12-02T21:11:19,792 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44765 is added to blk_1073741847_1023 (size=1545) 2024-12-02T21:11:19,796 DEBUG [RS:0;7d4f3b9a7081:42581 {}] wal.AbstractFSWAL(1071): Moved 2 WAL file(s) to /user/jenkins/test-data/7fb631ac-f516-8a5a-0a27-1cd781752804/oldWALs 2024-12-02T21:11:19,796 INFO [RS:0;7d4f3b9a7081:42581 {}] wal.AbstractFSWAL(1074): Closed WAL: FSHLog 7d4f3b9a7081%2C42581%2C1733173817044:(num 1733173879550) 2024-12-02T21:11:19,796 DEBUG [RS:0;7d4f3b9a7081:42581 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T21:11:19,796 INFO [RS:0;7d4f3b9a7081:42581 {}] regionserver.LeaseManager(133): Closed leases 2024-12-02T21:11:19,796 INFO [RS:0;7d4f3b9a7081:42581 {}] hbase.ChoreService(370): Chore service for: regionserver/7d4f3b9a7081:0 had [ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-12-02T21:11:19,796 INFO [regionserver/7d4f3b9a7081:0.logRoller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-12-02T21:11:19,797 INFO [RS:0;7d4f3b9a7081:42581 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:42581 2024-12-02T21:11:19,841 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44147-0x101992b50010000, quorum=127.0.0.1:59576, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-02T21:11:19,841 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42581-0x101992b50010001, quorum=127.0.0.1:59576, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/7d4f3b9a7081,42581,1733173817044 2024-12-02T21:11:19,842 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [7d4f3b9a7081,42581,1733173817044] 2024-12-02T21:11:19,842 DEBUG [RegionServerTracker-0 {}] master.DeadServer(103): Processing 7d4f3b9a7081,42581,1733173817044; numProcessing=1 2024-12-02T21:11:19,903 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/draining/7d4f3b9a7081,42581,1733173817044 already deleted, retry=false 2024-12-02T21:11:19,903 INFO [RegionServerTracker-0 {}] master.ServerManager(652): Cluster shutdown set; 7d4f3b9a7081,42581,1733173817044 expired; onlineServers=0 2024-12-02T21:11:19,903 INFO [RegionServerTracker-0 {}] regionserver.HRegionServer(2561): ***** STOPPING region server '7d4f3b9a7081,44147,1733173816735' ***** 2024-12-02T21:11:19,903 INFO [RegionServerTracker-0 {}] regionserver.HRegionServer(2575): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-02T21:11:19,904 DEBUG [M:0;7d4f3b9a7081:44147 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1bbdde7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=7d4f3b9a7081/172.17.0.2:0 2024-12-02T21:11:19,904 INFO [M:0;7d4f3b9a7081:44147 {}] regionserver.HRegionServer(1224): stopping server 7d4f3b9a7081,44147,1733173816735 2024-12-02T21:11:19,904 INFO [M:0;7d4f3b9a7081:44147 {}] regionserver.HRegionServer(1250): stopping server 7d4f3b9a7081,44147,1733173816735; all regions closed. 2024-12-02T21:11:19,904 DEBUG [M:0;7d4f3b9a7081:44147 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T21:11:19,905 DEBUG [M:0;7d4f3b9a7081:44147 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-02T21:11:19,905 DEBUG [M:0;7d4f3b9a7081:44147 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-02T21:11:19,905 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-02T21:11:19,905 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster-HFileCleaner.large.0-1733173817335 {}] cleaner.HFileCleaner(306): Exit Thread[master/7d4f3b9a7081:0:becomeActiveMaster-HFileCleaner.large.0-1733173817335,5,FailOnTimeoutGroup] 2024-12-02T21:11:19,905 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster-HFileCleaner.small.0-1733173817338 {}] cleaner.HFileCleaner(306): Exit Thread[master/7d4f3b9a7081:0:becomeActiveMaster-HFileCleaner.small.0-1733173817338,5,FailOnTimeoutGroup] 2024-12-02T21:11:19,906 INFO [M:0;7d4f3b9a7081:44147 {}] hbase.ChoreService(370): Chore service for: master/7d4f3b9a7081:0 had [] on shutdown 2024-12-02T21:11:19,907 DEBUG [M:0;7d4f3b9a7081:44147 {}] master.HMaster(1733): Stopping service threads 2024-12-02T21:11:19,907 INFO [M:0;7d4f3b9a7081:44147 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-02T21:11:19,908 INFO [M:0;7d4f3b9a7081:44147 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-02T21:11:19,908 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-02T21:11:19,949 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44147-0x101992b50010000, quorum=127.0.0.1:59576, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-02T21:11:19,950 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44147-0x101992b50010000, quorum=127.0.0.1:59576, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T21:11:19,950 DEBUG [M:0;7d4f3b9a7081:44147 {}] zookeeper.ZKUtil(347): master:44147-0x101992b50010000, quorum=127.0.0.1:59576, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-02T21:11:19,950 WARN [M:0;7d4f3b9a7081:44147 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-02T21:11:19,950 INFO [M:0;7d4f3b9a7081:44147 {}] assignment.AssignmentManager(391): Stopping assignment manager 2024-12-02T21:11:19,950 INFO [M:0;7d4f3b9a7081:44147 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-02T21:11:19,950 DEBUG [M:0;7d4f3b9a7081:44147 {}] regionserver.HRegion(1681): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-02T21:11:19,950 INFO [M:0;7d4f3b9a7081:44147 {}] regionserver.HRegion(1703): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-02T21:11:19,950 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:44147-0x101992b50010000, quorum=127.0.0.1:59576, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-02T21:11:19,951 DEBUG [M:0;7d4f3b9a7081:44147 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-02T21:11:19,951 DEBUG [M:0;7d4f3b9a7081:44147 {}] regionserver.HRegion(1791): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-02T21:11:19,951 DEBUG [M:0;7d4f3b9a7081:44147 {}] regionserver.HRegion(1801): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-02T21:11:19,951 INFO [M:0;7d4f3b9a7081:44147 {}] regionserver.HRegion(2837): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=65.02 KB heapSize=81.62 KB 2024-12-02T21:11:19,973 DEBUG [M:0;7d4f3b9a7081:44147 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/7fb631ac-f516-8a5a-0a27-1cd781752804/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/9615c9ebd61a4c3c9b610b4be50eaac3 is 82, key is hbase:meta,,1/info:regioninfo/1733173818203/Put/seqid=0 2024-12-02T21:11:19,977 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34831 is added to blk_1073741851_1027 (size=5672) 2024-12-02T21:11:19,977 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44765 is added to blk_1073741851_1027 (size=5672) 2024-12-02T21:11:19,978 INFO [M:0;7d4f3b9a7081:44147 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=184 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/7fb631ac-f516-8a5a-0a27-1cd781752804/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/9615c9ebd61a4c3c9b610b4be50eaac3 2024-12-02T21:11:19,992 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42581-0x101992b50010001, quorum=127.0.0.1:59576, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-02T21:11:19,992 INFO [RS:0;7d4f3b9a7081:42581 {}] regionserver.HRegionServer(1307): Exiting; stopping=7d4f3b9a7081,42581,1733173817044; zookeeper connection closed. 2024-12-02T21:11:19,992 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42581-0x101992b50010001, quorum=127.0.0.1:59576, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-02T21:11:19,992 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@2fde63ea {}] hbase.MiniHBaseCluster$SingleFileSystemShutdownThread(216): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@2fde63ea 2024-12-02T21:11:19,992 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-12-02T21:11:19,996 DEBUG [M:0;7d4f3b9a7081:44147 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/7fb631ac-f516-8a5a-0a27-1cd781752804/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/ce98bb2a046d4c66ad3d3dabd2b4ade1 is 798, key is \x00\x00\x00\x00\x00\x00\x00\x09/proc:d/1733173819813/Put/seqid=0 2024-12-02T21:11:20,002 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44765 is added to blk_1073741852_1028 (size=8350) 2024-12-02T21:11:20,002 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34831 is added to blk_1073741852_1028 (size=8350) 2024-12-02T21:11:20,003 INFO [M:0;7d4f3b9a7081:44147 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=64.42 KB at sequenceid=184 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/7fb631ac-f516-8a5a-0a27-1cd781752804/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/ce98bb2a046d4c66ad3d3dabd2b4ade1 2024-12-02T21:11:20,008 INFO [M:0;7d4f3b9a7081:44147 {}] regionserver.StoreFileReader(539): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for ce98bb2a046d4c66ad3d3dabd2b4ade1 2024-12-02T21:11:20,021 DEBUG [M:0;7d4f3b9a7081:44147 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/7fb631ac-f516-8a5a-0a27-1cd781752804/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/3061cf3da33646148765978234376c27 is 69, key is 7d4f3b9a7081,42581,1733173817044/rs:state/1733173817402/Put/seqid=0 2024-12-02T21:11:20,027 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34831 is added to blk_1073741853_1029 (size=5156) 2024-12-02T21:11:20,027 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44765 is added to blk_1073741853_1029 (size=5156) 2024-12-02T21:11:20,028 INFO [M:0;7d4f3b9a7081:44147 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=184 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/7fb631ac-f516-8a5a-0a27-1cd781752804/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/3061cf3da33646148765978234376c27 2024-12-02T21:11:20,045 DEBUG [M:0;7d4f3b9a7081:44147 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43731/user/jenkins/test-data/7fb631ac-f516-8a5a-0a27-1cd781752804/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/4efa399c35534a7c9d616479294b1443 is 52, key is load_balancer_on/state:d/1733173819387/Put/seqid=0 2024-12-02T21:11:20,049 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34831 is added to blk_1073741854_1030 (size=5056) 2024-12-02T21:11:20,049 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44765 is added to blk_1073741854_1030 (size=5056) 2024-12-02T21:11:20,050 INFO [M:0;7d4f3b9a7081:44147 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=184 (bloomFilter=true), to=hdfs://localhost:43731/user/jenkins/test-data/7fb631ac-f516-8a5a-0a27-1cd781752804/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/4efa399c35534a7c9d616479294b1443 2024-12-02T21:11:20,055 DEBUG [M:0;7d4f3b9a7081:44147 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/7fb631ac-f516-8a5a-0a27-1cd781752804/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/9615c9ebd61a4c3c9b610b4be50eaac3 as hdfs://localhost:43731/user/jenkins/test-data/7fb631ac-f516-8a5a-0a27-1cd781752804/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/9615c9ebd61a4c3c9b610b4be50eaac3 2024-12-02T21:11:20,060 INFO [M:0;7d4f3b9a7081:44147 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/7fb631ac-f516-8a5a-0a27-1cd781752804/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/9615c9ebd61a4c3c9b610b4be50eaac3, entries=8, sequenceid=184, filesize=5.5 K 2024-12-02T21:11:20,061 DEBUG [M:0;7d4f3b9a7081:44147 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/7fb631ac-f516-8a5a-0a27-1cd781752804/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/ce98bb2a046d4c66ad3d3dabd2b4ade1 as hdfs://localhost:43731/user/jenkins/test-data/7fb631ac-f516-8a5a-0a27-1cd781752804/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/ce98bb2a046d4c66ad3d3dabd2b4ade1 2024-12-02T21:11:20,065 INFO [M:0;7d4f3b9a7081:44147 {}] regionserver.StoreFileReader(539): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for ce98bb2a046d4c66ad3d3dabd2b4ade1 2024-12-02T21:11:20,065 INFO [M:0;7d4f3b9a7081:44147 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/7fb631ac-f516-8a5a-0a27-1cd781752804/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/ce98bb2a046d4c66ad3d3dabd2b4ade1, entries=21, sequenceid=184, filesize=8.2 K 2024-12-02T21:11:20,066 DEBUG [M:0;7d4f3b9a7081:44147 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/7fb631ac-f516-8a5a-0a27-1cd781752804/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/3061cf3da33646148765978234376c27 as hdfs://localhost:43731/user/jenkins/test-data/7fb631ac-f516-8a5a-0a27-1cd781752804/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/3061cf3da33646148765978234376c27 2024-12-02T21:11:20,071 INFO [M:0;7d4f3b9a7081:44147 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/7fb631ac-f516-8a5a-0a27-1cd781752804/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/3061cf3da33646148765978234376c27, entries=1, sequenceid=184, filesize=5.0 K 2024-12-02T21:11:20,072 DEBUG [M:0;7d4f3b9a7081:44147 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43731/user/jenkins/test-data/7fb631ac-f516-8a5a-0a27-1cd781752804/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/4efa399c35534a7c9d616479294b1443 as hdfs://localhost:43731/user/jenkins/test-data/7fb631ac-f516-8a5a-0a27-1cd781752804/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/4efa399c35534a7c9d616479294b1443 2024-12-02T21:11:20,077 INFO [M:0;7d4f3b9a7081:44147 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43731/user/jenkins/test-data/7fb631ac-f516-8a5a-0a27-1cd781752804/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/4efa399c35534a7c9d616479294b1443, entries=1, sequenceid=184, filesize=4.9 K 2024-12-02T21:11:20,078 INFO [M:0;7d4f3b9a7081:44147 {}] regionserver.HRegion(3040): Finished flush of dataSize ~65.02 KB/66583, heapSize ~81.55 KB/83512, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 127ms, sequenceid=184, compaction requested=false 2024-12-02T21:11:20,079 INFO [M:0;7d4f3b9a7081:44147 {}] regionserver.HRegion(1922): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-02T21:11:20,079 DEBUG [M:0;7d4f3b9a7081:44147 {}] regionserver.HRegion(1635): Region close journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-02T21:11:20,080 DEBUG [WAL-Shutdown-0 {}] wal.FSHLog(499): Closing WAL writer in /user/jenkins/test-data/7fb631ac-f516-8a5a-0a27-1cd781752804/MasterData/WALs/7d4f3b9a7081,44147,1733173816735 2024-12-02T21:11:20,081 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34831 is added to blk_1073741830_1006 (size=79104) 2024-12-02T21:11:20,081 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44765 is added to blk_1073741830_1006 (size=79104) 2024-12-02T21:11:20,082 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-12-02T21:11:20,082 INFO [M:0;7d4f3b9a7081:44147 {}] flush.MasterFlushTableProcedureManager(91): stop: server shutting down. 2024-12-02T21:11:20,082 INFO [M:0;7d4f3b9a7081:44147 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:44147 2024-12-02T21:11:20,102 DEBUG [M:0;7d4f3b9a7081:44147 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/rs/7d4f3b9a7081,44147,1733173816735 already deleted, retry=false 2024-12-02T21:11:20,228 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44147-0x101992b50010000, quorum=127.0.0.1:59576, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-02T21:11:20,228 INFO [M:0;7d4f3b9a7081:44147 {}] regionserver.HRegionServer(1307): Exiting; stopping=7d4f3b9a7081,44147,1733173816735; zookeeper connection closed. 2024-12-02T21:11:20,228 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44147-0x101992b50010000, quorum=127.0.0.1:59576, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-02T21:11:20,235 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@b409b3f{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-02T21:11:20,235 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@546a7485{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-02T21:11:20,235 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-02T21:11:20,236 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7d251d77{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-02T21:11:20,236 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@494f0f0c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e0d70a6d-1c1b-d5f4-3b2c-c3eb34b63681/hadoop.log.dir/,STOPPED} 2024-12-02T21:11:20,238 WARN [BP-2001958662-172.17.0.2-1733173815169 heartbeating to localhost/127.0.0.1:43731 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-02T21:11:20,238 WARN [BP-2001958662-172.17.0.2-1733173815169 heartbeating to localhost/127.0.0.1:43731 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-2001958662-172.17.0.2-1733173815169 (Datanode Uuid 69a2293b-2994-49b5-a07c-6712e747be0a) service to localhost/127.0.0.1:43731 2024-12-02T21:11:20,238 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-02T21:11:20,238 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-02T21:11:20,239 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e0d70a6d-1c1b-d5f4-3b2c-c3eb34b63681/cluster_6c800ea8-c534-eaaf-9cff-a3cfa2451d6b/dfs/data/data3/current/BP-2001958662-172.17.0.2-1733173815169 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-02T21:11:20,239 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e0d70a6d-1c1b-d5f4-3b2c-c3eb34b63681/cluster_6c800ea8-c534-eaaf-9cff-a3cfa2451d6b/dfs/data/data4/current/BP-2001958662-172.17.0.2-1733173815169 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-02T21:11:20,239 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-02T21:11:20,241 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@6e7d6ea1{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-02T21:11:20,241 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@1fd419fc{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-02T21:11:20,241 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-02T21:11:20,241 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@32ab8a68{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-02T21:11:20,241 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6186507e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e0d70a6d-1c1b-d5f4-3b2c-c3eb34b63681/hadoop.log.dir/,STOPPED} 2024-12-02T21:11:20,243 WARN [BP-2001958662-172.17.0.2-1733173815169 heartbeating to localhost/127.0.0.1:43731 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-02T21:11:20,243 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-02T21:11:20,243 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-02T21:11:20,243 WARN [BP-2001958662-172.17.0.2-1733173815169 heartbeating to localhost/127.0.0.1:43731 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-2001958662-172.17.0.2-1733173815169 (Datanode Uuid e1c08ab6-f856-4cea-a27b-d21956f3ae93) service to localhost/127.0.0.1:43731 2024-12-02T21:11:20,243 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e0d70a6d-1c1b-d5f4-3b2c-c3eb34b63681/cluster_6c800ea8-c534-eaaf-9cff-a3cfa2451d6b/dfs/data/data1/current/BP-2001958662-172.17.0.2-1733173815169 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-02T21:11:20,243 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e0d70a6d-1c1b-d5f4-3b2c-c3eb34b63681/cluster_6c800ea8-c534-eaaf-9cff-a3cfa2451d6b/dfs/data/data2/current/BP-2001958662-172.17.0.2-1733173815169 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-02T21:11:20,243 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-02T21:11:20,249 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@2c32686b{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-02T21:11:20,250 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@118f9356{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-02T21:11:20,250 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-02T21:11:20,250 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4ac584cd{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-02T21:11:20,250 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@40c6fefe{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e0d70a6d-1c1b-d5f4-3b2c-c3eb34b63681/hadoop.log.dir/,STOPPED} 2024-12-02T21:11:20,256 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(345): Shutdown MiniZK cluster with all ZK servers 2024-12-02T21:11:20,274 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1347): Minicluster is down 2024-12-02T21:11:20,280 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testCompactionRecordDoesntBlockRolling Thread=111 (was 102) - Thread LEAK? -, OpenFileDescriptor=464 (was 452) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=109 (was 54) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=7069 (was 7929) 2024-12-02T21:11:20,285 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRolling Thread=112, OpenFileDescriptor=464, MaxFileDescriptor=1048576, SystemLoadAverage=109, ProcessCount=11, AvailableMemoryMB=7069 2024-12-02T21:11:20,285 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1126): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-02T21:11:20,285 INFO [Time-limited test {}] hbase.HBaseTestingUtility(451): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e0d70a6d-1c1b-d5f4-3b2c-c3eb34b63681/hadoop.log.dir so I do NOT create it in target/test-data/6f9b010f-f1bb-e267-e41f-dde7c2a41da4 2024-12-02T21:11:20,285 INFO [Time-limited test {}] hbase.HBaseTestingUtility(451): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e0d70a6d-1c1b-d5f4-3b2c-c3eb34b63681/hadoop.tmp.dir so I do NOT create it in target/test-data/6f9b010f-f1bb-e267-e41f-dde7c2a41da4 2024-12-02T21:11:20,285 INFO [Time-limited test {}] hbase.HBaseZKTestingUtility(82): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/6f9b010f-f1bb-e267-e41f-dde7c2a41da4/cluster_fa39a46f-f7b4-88f9-3eda-0cc056d788d5, deleteOnExit=true 2024-12-02T21:11:20,285 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1140): STARTING DFS 2024-12-02T21:11:20,285 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/6f9b010f-f1bb-e267-e41f-dde7c2a41da4/test.cache.data in system properties and HBase conf 2024-12-02T21:11:20,285 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/6f9b010f-f1bb-e267-e41f-dde7c2a41da4/hadoop.tmp.dir in system properties and HBase conf 2024-12-02T21:11:20,285 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/6f9b010f-f1bb-e267-e41f-dde7c2a41da4/hadoop.log.dir in system properties and HBase conf 2024-12-02T21:11:20,285 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/6f9b010f-f1bb-e267-e41f-dde7c2a41da4/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-02T21:11:20,285 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/6f9b010f-f1bb-e267-e41f-dde7c2a41da4/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-02T21:11:20,286 INFO [Time-limited test {}] hbase.HBaseTestingUtility(811): read short circuit is OFF 2024-12-02T21:11:20,286 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-02T21:11:20,286 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/6f9b010f-f1bb-e267-e41f-dde7c2a41da4/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-02T21:11:20,286 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/6f9b010f-f1bb-e267-e41f-dde7c2a41da4/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-02T21:11:20,286 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/6f9b010f-f1bb-e267-e41f-dde7c2a41da4/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-02T21:11:20,286 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/6f9b010f-f1bb-e267-e41f-dde7c2a41da4/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-02T21:11:20,286 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/6f9b010f-f1bb-e267-e41f-dde7c2a41da4/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-02T21:11:20,286 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/6f9b010f-f1bb-e267-e41f-dde7c2a41da4/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-02T21:11:20,286 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/6f9b010f-f1bb-e267-e41f-dde7c2a41da4/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-02T21:11:20,286 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/6f9b010f-f1bb-e267-e41f-dde7c2a41da4/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-02T21:11:20,286 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/6f9b010f-f1bb-e267-e41f-dde7c2a41da4/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-02T21:11:20,286 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/6f9b010f-f1bb-e267-e41f-dde7c2a41da4/nfs.dump.dir in system properties and HBase conf 2024-12-02T21:11:20,286 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/6f9b010f-f1bb-e267-e41f-dde7c2a41da4/java.io.tmpdir in system properties and HBase conf 2024-12-02T21:11:20,286 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/6f9b010f-f1bb-e267-e41f-dde7c2a41da4/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-02T21:11:20,287 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/6f9b010f-f1bb-e267-e41f-dde7c2a41da4/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-02T21:11:20,287 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/6f9b010f-f1bb-e267-e41f-dde7c2a41da4/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-02T21:11:20,300 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-02T21:11:20,458 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.meta.1733173708307.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:11:20,517 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-02T21:11:20,521 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-02T21:11:20,522 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-02T21:11:20,522 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-02T21:11:20,522 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-02T21:11:20,523 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-02T21:11:20,523 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@450ce414{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/6f9b010f-f1bb-e267-e41f-dde7c2a41da4/hadoop.log.dir/,AVAILABLE} 2024-12-02T21:11:20,523 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@71b9bf54{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-02T21:11:20,615 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@6de7bcd8{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/6f9b010f-f1bb-e267-e41f-dde7c2a41da4/java.io.tmpdir/jetty-localhost-32769-hadoop-hdfs-3_4_1-tests_jar-_-any-18318664080452710251/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-02T21:11:20,616 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@69205fd0{HTTP/1.1, (http/1.1)}{localhost:32769} 2024-12-02T21:11:20,616 INFO [Time-limited test {}] server.Server(415): Started @291996ms 2024-12-02T21:11:20,630 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-02T21:11:20,842 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-02T21:11:20,846 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-02T21:11:20,847 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-02T21:11:20,847 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-02T21:11:20,847 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-02T21:11:20,847 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@22a22fa6{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/6f9b010f-f1bb-e267-e41f-dde7c2a41da4/hadoop.log.dir/,AVAILABLE} 2024-12-02T21:11:20,848 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@76d04136{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-02T21:11:20,937 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@45272860{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/6f9b010f-f1bb-e267-e41f-dde7c2a41da4/java.io.tmpdir/jetty-localhost-34985-hadoop-hdfs-3_4_1-tests_jar-_-any-14742197992143551613/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-02T21:11:20,937 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@613ebde1{HTTP/1.1, (http/1.1)}{localhost:34985} 2024-12-02T21:11:20,937 INFO [Time-limited test {}] server.Server(415): Started @292317ms 2024-12-02T21:11:20,938 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-02T21:11:20,966 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-02T21:11:20,968 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-02T21:11:20,969 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-02T21:11:20,969 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-02T21:11:20,969 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-02T21:11:20,969 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6bdeed05{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/6f9b010f-f1bb-e267-e41f-dde7c2a41da4/hadoop.log.dir/,AVAILABLE} 2024-12-02T21:11:20,970 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5e53ee54{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-02T21:11:21,061 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7f3995f0{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/6f9b010f-f1bb-e267-e41f-dde7c2a41da4/java.io.tmpdir/jetty-localhost-45707-hadoop-hdfs-3_4_1-tests_jar-_-any-14958253189539176690/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-02T21:11:21,061 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@2eb7c5c4{HTTP/1.1, (http/1.1)}{localhost:45707} 2024-12-02T21:11:21,061 INFO [Time-limited test {}] server.Server(415): Started @292441ms 2024-12-02T21:11:21,062 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-02T21:11:21,427 INFO [regionserver/7d4f3b9a7081:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-02T21:11:21,459 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.meta.1733173708307.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:11:21,555 WARN [Thread-1721 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/6f9b010f-f1bb-e267-e41f-dde7c2a41da4/cluster_fa39a46f-f7b4-88f9-3eda-0cc056d788d5/dfs/data/data1/current/BP-1860221667-172.17.0.2-1733173880307/current, will proceed with Du for space computation calculation, 2024-12-02T21:11:21,555 WARN [Thread-1722 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/6f9b010f-f1bb-e267-e41f-dde7c2a41da4/cluster_fa39a46f-f7b4-88f9-3eda-0cc056d788d5/dfs/data/data2/current/BP-1860221667-172.17.0.2-1733173880307/current, will proceed with Du for space computation calculation, 2024-12-02T21:11:21,576 WARN [Thread-1685 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-02T21:11:21,578 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x2559c3588fe6d4c3 with lease ID 0x9c469015604ed45e: Processing first storage report for DS-aef9d387-cdd6-406a-8075-4e7709e5da4a from datanode DatanodeRegistration(127.0.0.1:46855, datanodeUuid=b3e2690e-061b-4518-862a-5786927a1c7e, infoPort=42237, infoSecurePort=0, ipcPort=41437, storageInfo=lv=-57;cid=testClusterID;nsid=201610125;c=1733173880307) 2024-12-02T21:11:21,578 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x2559c3588fe6d4c3 with lease ID 0x9c469015604ed45e: from storage DS-aef9d387-cdd6-406a-8075-4e7709e5da4a node DatanodeRegistration(127.0.0.1:46855, datanodeUuid=b3e2690e-061b-4518-862a-5786927a1c7e, infoPort=42237, infoSecurePort=0, ipcPort=41437, storageInfo=lv=-57;cid=testClusterID;nsid=201610125;c=1733173880307), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-02T21:11:21,578 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x2559c3588fe6d4c3 with lease ID 0x9c469015604ed45e: Processing first storage report for DS-f2a01f85-b2b2-4adc-9e1a-efe8353559a1 from datanode DatanodeRegistration(127.0.0.1:46855, datanodeUuid=b3e2690e-061b-4518-862a-5786927a1c7e, infoPort=42237, infoSecurePort=0, ipcPort=41437, storageInfo=lv=-57;cid=testClusterID;nsid=201610125;c=1733173880307) 2024-12-02T21:11:21,578 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x2559c3588fe6d4c3 with lease ID 0x9c469015604ed45e: from storage DS-f2a01f85-b2b2-4adc-9e1a-efe8353559a1 node DatanodeRegistration(127.0.0.1:46855, datanodeUuid=b3e2690e-061b-4518-862a-5786927a1c7e, infoPort=42237, infoSecurePort=0, ipcPort=41437, storageInfo=lv=-57;cid=testClusterID;nsid=201610125;c=1733173880307), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-02T21:11:21,644 WARN [Thread-1732 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/6f9b010f-f1bb-e267-e41f-dde7c2a41da4/cluster_fa39a46f-f7b4-88f9-3eda-0cc056d788d5/dfs/data/data3/current/BP-1860221667-172.17.0.2-1733173880307/current, will proceed with Du for space computation calculation, 2024-12-02T21:11:21,644 WARN [Thread-1733 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/6f9b010f-f1bb-e267-e41f-dde7c2a41da4/cluster_fa39a46f-f7b4-88f9-3eda-0cc056d788d5/dfs/data/data4/current/BP-1860221667-172.17.0.2-1733173880307/current, will proceed with Du for space computation calculation, 2024-12-02T21:11:21,667 WARN [Thread-1708 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-02T21:11:21,669 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xc7fb121067f202a0 with lease ID 0x9c469015604ed45f: Processing first storage report for DS-fe072dbb-281f-4bb0-8529-a723dc3a7063 from datanode DatanodeRegistration(127.0.0.1:32945, datanodeUuid=ddbdc62c-1399-4729-ab2a-c797860c0e62, infoPort=33209, infoSecurePort=0, ipcPort=40821, storageInfo=lv=-57;cid=testClusterID;nsid=201610125;c=1733173880307) 2024-12-02T21:11:21,670 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xc7fb121067f202a0 with lease ID 0x9c469015604ed45f: from storage DS-fe072dbb-281f-4bb0-8529-a723dc3a7063 node DatanodeRegistration(127.0.0.1:32945, datanodeUuid=ddbdc62c-1399-4729-ab2a-c797860c0e62, infoPort=33209, infoSecurePort=0, ipcPort=40821, storageInfo=lv=-57;cid=testClusterID;nsid=201610125;c=1733173880307), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-02T21:11:21,670 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xc7fb121067f202a0 with lease ID 0x9c469015604ed45f: Processing first storage report for DS-dbc99f4a-5889-496e-b123-9305968ba9ff from datanode DatanodeRegistration(127.0.0.1:32945, datanodeUuid=ddbdc62c-1399-4729-ab2a-c797860c0e62, infoPort=33209, infoSecurePort=0, ipcPort=40821, storageInfo=lv=-57;cid=testClusterID;nsid=201610125;c=1733173880307) 2024-12-02T21:11:21,670 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xc7fb121067f202a0 with lease ID 0x9c469015604ed45f: from storage DS-dbc99f4a-5889-496e-b123-9305968ba9ff node DatanodeRegistration(127.0.0.1:32945, datanodeUuid=ddbdc62c-1399-4729-ab2a-c797860c0e62, infoPort=33209, infoSecurePort=0, ipcPort=40821, storageInfo=lv=-57;cid=testClusterID;nsid=201610125;c=1733173880307), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-02T21:11:21,685 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(703): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/6f9b010f-f1bb-e267-e41f-dde7c2a41da4 2024-12-02T21:11:21,688 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(259): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/6f9b010f-f1bb-e267-e41f-dde7c2a41da4/cluster_fa39a46f-f7b4-88f9-3eda-0cc056d788d5/zookeeper_0, clientPort=59514, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/6f9b010f-f1bb-e267-e41f-dde7c2a41da4/cluster_fa39a46f-f7b4-88f9-3eda-0cc056d788d5/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/6f9b010f-f1bb-e267-e41f-dde7c2a41da4/cluster_fa39a46f-f7b4-88f9-3eda-0cc056d788d5/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-02T21:11:21,689 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(284): Started MiniZooKeeperCluster and ran 'stat' on client port=59514 2024-12-02T21:11:21,689 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T21:11:21,690 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T21:11:21,699 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46855 is added to blk_1073741825_1001 (size=7) 2024-12-02T21:11:21,699 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32945 is added to blk_1073741825_1001 (size=7) 2024-12-02T21:11:21,700 INFO [Time-limited test {}] util.FSUtils(490): Created version file at hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30 with version=8 2024-12-02T21:11:21,700 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1462): The hbase.fs.tmp.dir is set to hdfs://localhost:39905/user/jenkins/test-data/2cbae3b8-f761-787a-953c-0b58f120ed37/hbase-staging 2024-12-02T21:11:21,702 INFO [Time-limited test {}] client.ConnectionUtils(129): master/7d4f3b9a7081:0 server-side Connection retries=45 2024-12-02T21:11:21,702 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-02T21:11:21,702 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-02T21:11:21,702 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-02T21:11:21,702 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-02T21:11:21,702 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-02T21:11:21,702 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-02T21:11:21,702 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-02T21:11:21,703 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:44741 2024-12-02T21:11:21,704 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T21:11:21,705 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T21:11:21,707 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=master:44741 connecting to ZooKeeper ensemble=127.0.0.1:59514 2024-12-02T21:11:21,760 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:447410x0, quorum=127.0.0.1:59514, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-02T21:11:21,761 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:44741-0x101992c4dcc0000 connected 2024-12-02T21:11:21,824 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:44741-0x101992c4dcc0000, quorum=127.0.0.1:59514, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-02T21:11:21,825 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:44741-0x101992c4dcc0000, quorum=127.0.0.1:59514, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-02T21:11:21,826 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:44741-0x101992c4dcc0000, quorum=127.0.0.1:59514, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-02T21:11:21,827 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=44741 2024-12-02T21:11:21,827 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=44741 2024-12-02T21:11:21,827 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=44741 2024-12-02T21:11:21,828 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=44741 2024-12-02T21:11:21,828 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=44741 2024-12-02T21:11:21,828 INFO [Time-limited test {}] master.HMaster(488): hbase.rootdir=hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30, hbase.cluster.distributed=false 2024-12-02T21:11:21,847 INFO [Time-limited test {}] client.ConnectionUtils(129): regionserver/7d4f3b9a7081:0 server-side Connection retries=45 2024-12-02T21:11:21,847 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-02T21:11:21,847 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-02T21:11:21,847 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-02T21:11:21,847 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-02T21:11:21,847 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-02T21:11:21,847 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-02T21:11:21,848 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-02T21:11:21,848 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:45421 2024-12-02T21:11:21,848 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-02T21:11:21,849 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-02T21:11:21,849 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T21:11:21,851 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T21:11:21,853 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=regionserver:45421 connecting to ZooKeeper ensemble=127.0.0.1:59514 2024-12-02T21:11:21,861 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:454210x0, quorum=127.0.0.1:59514, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-02T21:11:21,862 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:45421-0x101992c4dcc0001 connected 2024-12-02T21:11:21,862 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45421-0x101992c4dcc0001, quorum=127.0.0.1:59514, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-02T21:11:21,863 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45421-0x101992c4dcc0001, quorum=127.0.0.1:59514, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-02T21:11:21,863 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45421-0x101992c4dcc0001, quorum=127.0.0.1:59514, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-02T21:11:21,863 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=45421 2024-12-02T21:11:21,864 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=45421 2024-12-02T21:11:21,864 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=45421 2024-12-02T21:11:21,864 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=45421 2024-12-02T21:11:21,864 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=45421 2024-12-02T21:11:21,865 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] master.HMaster(2445): Adding backup master ZNode /hbase/backup-masters/7d4f3b9a7081,44741,1733173881701 2024-12-02T21:11:21,873 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44741-0x101992c4dcc0000, quorum=127.0.0.1:59514, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-02T21:11:21,873 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45421-0x101992c4dcc0001, quorum=127.0.0.1:59514, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-02T21:11:21,873 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:44741-0x101992c4dcc0000, quorum=127.0.0.1:59514, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/7d4f3b9a7081,44741,1733173881701 2024-12-02T21:11:21,877 DEBUG [M:0;7d4f3b9a7081:44741 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;7d4f3b9a7081:44741 2024-12-02T21:11:21,881 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44741-0x101992c4dcc0000, quorum=127.0.0.1:59514, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-02T21:11:21,881 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45421-0x101992c4dcc0001, quorum=127.0.0.1:59514, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-02T21:11:21,881 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44741-0x101992c4dcc0000, quorum=127.0.0.1:59514, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T21:11:21,881 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45421-0x101992c4dcc0001, quorum=127.0.0.1:59514, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T21:11:21,882 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:44741-0x101992c4dcc0000, quorum=127.0.0.1:59514, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-02T21:11:21,882 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/7d4f3b9a7081,44741,1733173881701 from backup master directory 2024-12-02T21:11:21,890 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44741-0x101992c4dcc0000, quorum=127.0.0.1:59514, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/7d4f3b9a7081,44741,1733173881701 2024-12-02T21:11:21,890 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45421-0x101992c4dcc0001, quorum=127.0.0.1:59514, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-02T21:11:21,890 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44741-0x101992c4dcc0000, quorum=127.0.0.1:59514, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-02T21:11:21,890 WARN [master/7d4f3b9a7081:0:becomeActiveMaster {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-02T21:11:21,890 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=7d4f3b9a7081,44741,1733173881701 2024-12-02T21:11:21,890 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(111): master:44741-0x101992c4dcc0000, quorum=127.0.0.1:59514, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-02T21:11:21,899 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46855 is added to blk_1073741826_1002 (size=42) 2024-12-02T21:11:21,899 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32945 is added to blk_1073741826_1002 (size=42) 2024-12-02T21:11:21,900 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] util.FSUtils(639): Created cluster ID file at hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/hbase.id with ID: f44344eb-ac9a-413f-b303-5271f06acce6 2024-12-02T21:11:21,908 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T21:11:21,920 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44741-0x101992c4dcc0000, quorum=127.0.0.1:59514, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T21:11:21,920 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45421-0x101992c4dcc0001, quorum=127.0.0.1:59514, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T21:11:21,927 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46855 is added to blk_1073741827_1003 (size=196) 2024-12-02T21:11:21,927 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32945 is added to blk_1073741827_1003 (size=196) 2024-12-02T21:11:21,928 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] region.MasterRegion(372): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-02T21:11:21,929 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-02T21:11:21,929 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-02T21:11:21,938 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32945 is added to blk_1073741828_1004 (size=1189) 2024-12-02T21:11:21,938 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46855 is added to blk_1073741828_1004 (size=1189) 2024-12-02T21:11:21,939 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] regionserver.HRegion(7124): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/MasterData/data/master/store 2024-12-02T21:11:21,949 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46855 is added to blk_1073741829_1005 (size=34) 2024-12-02T21:11:21,950 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32945 is added to blk_1073741829_1005 (size=34) 2024-12-02T21:11:21,950 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] regionserver.HRegion(894): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T21:11:21,951 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] regionserver.HRegion(1681): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-02T21:11:21,951 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] regionserver.HRegion(1703): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-02T21:11:21,951 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] regionserver.HRegion(1724): Waiting without time limit for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-02T21:11:21,951 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] regionserver.HRegion(1791): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-02T21:11:21,951 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] regionserver.HRegion(1801): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-02T21:11:21,951 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] regionserver.HRegion(1922): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-02T21:11:21,951 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] regionserver.HRegion(1635): Region close journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-02T21:11:21,952 WARN [master/7d4f3b9a7081:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/MasterData/data/master/store/.initializing 2024-12-02T21:11:21,952 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/MasterData/WALs/7d4f3b9a7081,44741,1733173881701 2024-12-02T21:11:21,955 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=7d4f3b9a7081%2C44741%2C1733173881701, suffix=, logDir=hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/MasterData/WALs/7d4f3b9a7081,44741,1733173881701, archiveDir=hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/MasterData/oldWALs, maxLogs=10 2024-12-02T21:11:21,955 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7d4f3b9a7081%2C44741%2C1733173881701.1733173881955 2024-12-02T21:11:21,960 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/MasterData/WALs/7d4f3b9a7081,44741,1733173881701/7d4f3b9a7081%2C44741%2C1733173881701.1733173881955 2024-12-02T21:11:21,960 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33209:33209),(127.0.0.1/127.0.0.1:42237:42237)] 2024-12-02T21:11:21,960 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] regionserver.HRegion(7285): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-02T21:11:21,960 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] regionserver.HRegion(894): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T21:11:21,960 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] regionserver.HRegion(7327): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-02T21:11:21,960 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] regionserver.HRegion(7330): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-02T21:11:21,961 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-02T21:11:21,962 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-02T21:11:21,962 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T21:11:21,963 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-02T21:11:21,963 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-02T21:11:21,964 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-02T21:11:21,964 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T21:11:21,964 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T21:11:21,965 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-02T21:11:21,966 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-02T21:11:21,966 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T21:11:21,966 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T21:11:21,967 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-02T21:11:21,968 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-02T21:11:21,968 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T21:11:21,968 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T21:11:21,969 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-02T21:11:21,970 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-02T21:11:21,971 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-02T21:11:21,972 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] regionserver.HRegion(1085): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-02T21:11:21,975 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-02T21:11:21,975 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] regionserver.HRegion(1102): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=788471, jitterRate=0.0025937706232070923}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-02T21:11:21,976 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] regionserver.HRegion(1001): Region open journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-02T21:11:21,977 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-02T21:11:21,980 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4043abec, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T21:11:21,980 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] master.HMaster(882): No meta location available on zookeeper, skip migrating... 2024-12-02T21:11:21,981 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-02T21:11:21,981 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(633): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-02T21:11:21,981 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-02T21:11:21,981 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(653): Recovered RegionProcedureStore lease in 0 msec 2024-12-02T21:11:21,981 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(667): Loaded RegionProcedureStore in 0 msec 2024-12-02T21:11:21,981 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-02T21:11:21,983 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-02T21:11:21,984 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44741-0x101992c4dcc0000, quorum=127.0.0.1:59514, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-02T21:11:21,989 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/balancer already deleted, retry=false 2024-12-02T21:11:21,990 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-02T21:11:21,990 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44741-0x101992c4dcc0000, quorum=127.0.0.1:59514, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-02T21:11:21,998 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/normalizer already deleted, retry=false 2024-12-02T21:11:21,998 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-02T21:11:21,999 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44741-0x101992c4dcc0000, quorum=127.0.0.1:59514, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-02T21:11:22,006 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/switch/split already deleted, retry=false 2024-12-02T21:11:22,007 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44741-0x101992c4dcc0000, quorum=127.0.0.1:59514, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-02T21:11:22,015 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/switch/merge already deleted, retry=false 2024-12-02T21:11:22,016 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44741-0x101992c4dcc0000, quorum=127.0.0.1:59514, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-02T21:11:22,023 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-02T21:11:22,031 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44741-0x101992c4dcc0000, quorum=127.0.0.1:59514, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-02T21:11:22,031 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45421-0x101992c4dcc0001, quorum=127.0.0.1:59514, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-02T21:11:22,031 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44741-0x101992c4dcc0000, quorum=127.0.0.1:59514, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T21:11:22,031 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45421-0x101992c4dcc0001, quorum=127.0.0.1:59514, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T21:11:22,032 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] master.HMaster(826): Active/primary master=7d4f3b9a7081,44741,1733173881701, sessionid=0x101992c4dcc0000, setting cluster-up flag (Was=false) 2024-12-02T21:11:22,048 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45421-0x101992c4dcc0001, quorum=127.0.0.1:59514, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T21:11:22,048 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44741-0x101992c4dcc0000, quorum=127.0.0.1:59514, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T21:11:22,073 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-02T21:11:22,075 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=7d4f3b9a7081,44741,1733173881701 2024-12-02T21:11:22,095 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44741-0x101992c4dcc0000, quorum=127.0.0.1:59514, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T21:11:22,095 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45421-0x101992c4dcc0001, quorum=127.0.0.1:59514, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T21:11:22,120 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-02T21:11:22,121 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=7d4f3b9a7081,44741,1733173881701 2024-12-02T21:11:22,123 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT; InitMetaProcedure table=hbase:meta 2024-12-02T21:11:22,123 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(575): slop=0.2 2024-12-02T21:11:22,124 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(294): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-02T21:11:22,124 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] balancer.RegionLocationFinder(146): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 7d4f3b9a7081,44741,1733173881701 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-02T21:11:22,124 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/7d4f3b9a7081:0, corePoolSize=5, maxPoolSize=5 2024-12-02T21:11:22,124 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/7d4f3b9a7081:0, corePoolSize=5, maxPoolSize=5 2024-12-02T21:11:22,124 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/7d4f3b9a7081:0, corePoolSize=5, maxPoolSize=5 2024-12-02T21:11:22,124 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/7d4f3b9a7081:0, corePoolSize=5, maxPoolSize=5 2024-12-02T21:11:22,124 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/7d4f3b9a7081:0, corePoolSize=10, maxPoolSize=10 2024-12-02T21:11:22,124 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/7d4f3b9a7081:0, corePoolSize=1, maxPoolSize=1 2024-12-02T21:11:22,124 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/7d4f3b9a7081:0, corePoolSize=2, maxPoolSize=2 2024-12-02T21:11:22,125 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/7d4f3b9a7081:0, corePoolSize=1, maxPoolSize=1 2024-12-02T21:11:22,126 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733173912126 2024-12-02T21:11:22,126 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(96): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, locked=true; InitMetaProcedure table=hbase:meta 2024-12-02T21:11:22,126 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(75): BOOTSTRAP: creating hbase:meta region 2024-12-02T21:11:22,126 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-02T21:11:22,126 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-02T21:11:22,126 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-02T21:11:22,126 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-02T21:11:22,126 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-02T21:11:22,126 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-02T21:11:22,127 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-02T21:11:22,127 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-02T21:11:22,127 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-02T21:11:22,127 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-02T21:11:22,127 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T21:11:22,127 INFO [PEWorker-1 {}] util.FSTableDescriptors(133): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-02T21:11:22,128 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-02T21:11:22,128 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-02T21:11:22,128 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/7d4f3b9a7081:0:becomeActiveMaster-HFileCleaner.large.0-1733173882128,5,FailOnTimeoutGroup] 2024-12-02T21:11:22,130 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/7d4f3b9a7081:0:becomeActiveMaster-HFileCleaner.small.0-1733173882128,5,FailOnTimeoutGroup] 2024-12-02T21:11:22,131 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-02T21:11:22,131 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] master.HMaster(1680): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-02T21:11:22,131 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-02T21:11:22,131 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-02T21:11:22,135 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46855 is added to blk_1073741831_1007 (size=1039) 2024-12-02T21:11:22,135 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32945 is added to blk_1073741831_1007 (size=1039) 2024-12-02T21:11:22,136 INFO [PEWorker-1 {}] util.FSTableDescriptors(140): Updated hbase:meta table descriptor to hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1039 2024-12-02T21:11:22,137 INFO [PEWorker-1 {}] regionserver.HRegion(7106): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30 2024-12-02T21:11:22,143 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32945 is added to blk_1073741832_1008 (size=32) 2024-12-02T21:11:22,146 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46855 is added to blk_1073741832_1008 (size=32) 2024-12-02T21:11:22,146 DEBUG [PEWorker-1 {}] regionserver.HRegion(894): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T21:11:22,147 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-02T21:11:22,149 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-02T21:11:22,149 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T21:11:22,149 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-02T21:11:22,149 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-02T21:11:22,151 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-02T21:11:22,151 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T21:11:22,151 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-02T21:11:22,151 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-02T21:11:22,152 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-02T21:11:22,152 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T21:11:22,153 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-02T21:11:22,153 DEBUG [PEWorker-1 {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/hbase/meta/1588230740 2024-12-02T21:11:22,154 DEBUG [PEWorker-1 {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/hbase/meta/1588230740 2024-12-02T21:11:22,155 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-02T21:11:22,156 DEBUG [PEWorker-1 {}] regionserver.HRegion(1085): writing seq id for 1588230740 2024-12-02T21:11:22,157 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-02T21:11:22,158 INFO [PEWorker-1 {}] regionserver.HRegion(1102): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=832444, jitterRate=0.058507487177848816}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-02T21:11:22,158 DEBUG [PEWorker-1 {}] regionserver.HRegion(1001): Region open journal for 1588230740: 2024-12-02T21:11:22,158 DEBUG [PEWorker-1 {}] regionserver.HRegion(1681): Closing 1588230740, disabling compactions & flushes 2024-12-02T21:11:22,158 INFO [PEWorker-1 {}] regionserver.HRegion(1703): Closing region hbase:meta,,1.1588230740 2024-12-02T21:11:22,158 DEBUG [PEWorker-1 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:meta,,1.1588230740 2024-12-02T21:11:22,158 DEBUG [PEWorker-1 {}] regionserver.HRegion(1791): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-02T21:11:22,158 DEBUG [PEWorker-1 {}] regionserver.HRegion(1801): Updates disabled for region hbase:meta,,1.1588230740 2024-12-02T21:11:22,158 INFO [PEWorker-1 {}] regionserver.HRegion(1922): Closed hbase:meta,,1.1588230740 2024-12-02T21:11:22,158 DEBUG [PEWorker-1 {}] regionserver.HRegion(1635): Region close journal for 1588230740: 2024-12-02T21:11:22,159 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(96): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, locked=true; InitMetaProcedure table=hbase:meta 2024-12-02T21:11:22,159 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(107): Going to assign meta 2024-12-02T21:11:22,159 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-02T21:11:22,160 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-02T21:11:22,160 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-02T21:11:22,178 DEBUG [RS:0;7d4f3b9a7081:45421 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;7d4f3b9a7081:45421 2024-12-02T21:11:22,179 INFO [RS:0;7d4f3b9a7081:45421 {}] regionserver.HRegionServer(1008): ClusterId : f44344eb-ac9a-413f-b303-5271f06acce6 2024-12-02T21:11:22,179 DEBUG [RS:0;7d4f3b9a7081:45421 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-02T21:11:22,195 DEBUG [RS:0;7d4f3b9a7081:45421 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-02T21:11:22,195 DEBUG [RS:0;7d4f3b9a7081:45421 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-02T21:11:22,213 DEBUG [RS:0;7d4f3b9a7081:45421 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-02T21:11:22,213 DEBUG [RS:0;7d4f3b9a7081:45421 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6aea762c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T21:11:22,214 DEBUG [RS:0;7d4f3b9a7081:45421 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4a11d868, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=7d4f3b9a7081/172.17.0.2:0 2024-12-02T21:11:22,214 INFO [RS:0;7d4f3b9a7081:45421 {}] regionserver.RegionServerCoprocessorHost(67): System coprocessor loading is enabled 2024-12-02T21:11:22,214 INFO [RS:0;7d4f3b9a7081:45421 {}] regionserver.RegionServerCoprocessorHost(68): Table coprocessor loading is enabled 2024-12-02T21:11:22,214 DEBUG [RS:0;7d4f3b9a7081:45421 {}] regionserver.HRegionServer(1090): About to register with Master. 2024-12-02T21:11:22,215 INFO [RS:0;7d4f3b9a7081:45421 {}] regionserver.HRegionServer(3073): reportForDuty to master=7d4f3b9a7081,44741,1733173881701 with isa=7d4f3b9a7081/172.17.0.2:45421, startcode=1733173881847 2024-12-02T21:11:22,215 DEBUG [RS:0;7d4f3b9a7081:45421 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-02T21:11:22,218 INFO [RS-EventLoopGroup-12-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44461, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.5 (auth:SIMPLE), service=RegionServerStatusService 2024-12-02T21:11:22,218 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44741 {}] master.ServerManager(332): Checking decommissioned status of RegionServer 7d4f3b9a7081,45421,1733173881847 2024-12-02T21:11:22,219 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44741 {}] master.ServerManager(486): Registering regionserver=7d4f3b9a7081,45421,1733173881847 2024-12-02T21:11:22,221 DEBUG [RS:0;7d4f3b9a7081:45421 {}] regionserver.HRegionServer(1725): Config from master: hbase.rootdir=hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30 2024-12-02T21:11:22,221 DEBUG [RS:0;7d4f3b9a7081:45421 {}] regionserver.HRegionServer(1725): Config from master: fs.defaultFS=hdfs://localhost:38991 2024-12-02T21:11:22,221 DEBUG [RS:0;7d4f3b9a7081:45421 {}] regionserver.HRegionServer(1725): Config from master: hbase.master.info.port=-1 2024-12-02T21:11:22,236 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44741-0x101992c4dcc0000, quorum=127.0.0.1:59514, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-02T21:11:22,237 DEBUG [RS:0;7d4f3b9a7081:45421 {}] zookeeper.ZKUtil(111): regionserver:45421-0x101992c4dcc0001, quorum=127.0.0.1:59514, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/7d4f3b9a7081,45421,1733173881847 2024-12-02T21:11:22,237 WARN [RS:0;7d4f3b9a7081:45421 {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-02T21:11:22,237 INFO [RS:0;7d4f3b9a7081:45421 {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-02T21:11:22,237 DEBUG [RS:0;7d4f3b9a7081:45421 {}] regionserver.HRegionServer(2100): logDir=hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/WALs/7d4f3b9a7081,45421,1733173881847 2024-12-02T21:11:22,237 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [7d4f3b9a7081,45421,1733173881847] 2024-12-02T21:11:22,241 DEBUG [RS:0;7d4f3b9a7081:45421 {}] regionserver.Replication(140): Replication stats-in-log period=300 seconds 2024-12-02T21:11:22,241 INFO [RS:0;7d4f3b9a7081:45421 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-02T21:11:22,243 INFO [RS:0;7d4f3b9a7081:45421 {}] regionserver.MemStoreFlusher(130): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-02T21:11:22,244 INFO [RS:0;7d4f3b9a7081:45421 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-02T21:11:22,244 INFO [RS:0;7d4f3b9a7081:45421 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-02T21:11:22,244 INFO [RS:0;7d4f3b9a7081:45421 {}] regionserver.HRegionServer$CompactionChecker(1988): CompactionChecker runs every PT1S 2024-12-02T21:11:22,245 INFO [RS:0;7d4f3b9a7081:45421 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-02T21:11:22,246 DEBUG [RS:0;7d4f3b9a7081:45421 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/7d4f3b9a7081:0, corePoolSize=1, maxPoolSize=1 2024-12-02T21:11:22,246 DEBUG [RS:0;7d4f3b9a7081:45421 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/7d4f3b9a7081:0, corePoolSize=1, maxPoolSize=1 2024-12-02T21:11:22,246 DEBUG [RS:0;7d4f3b9a7081:45421 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/7d4f3b9a7081:0, corePoolSize=1, maxPoolSize=1 2024-12-02T21:11:22,246 DEBUG [RS:0;7d4f3b9a7081:45421 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/7d4f3b9a7081:0, corePoolSize=1, maxPoolSize=1 2024-12-02T21:11:22,246 DEBUG [RS:0;7d4f3b9a7081:45421 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/7d4f3b9a7081:0, corePoolSize=1, maxPoolSize=1 2024-12-02T21:11:22,246 DEBUG [RS:0;7d4f3b9a7081:45421 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/7d4f3b9a7081:0, corePoolSize=2, maxPoolSize=2 2024-12-02T21:11:22,246 DEBUG [RS:0;7d4f3b9a7081:45421 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/7d4f3b9a7081:0, corePoolSize=1, maxPoolSize=1 2024-12-02T21:11:22,246 DEBUG [RS:0;7d4f3b9a7081:45421 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/7d4f3b9a7081:0, corePoolSize=1, maxPoolSize=1 2024-12-02T21:11:22,246 DEBUG [RS:0;7d4f3b9a7081:45421 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/7d4f3b9a7081:0, corePoolSize=1, maxPoolSize=1 2024-12-02T21:11:22,246 DEBUG [RS:0;7d4f3b9a7081:45421 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/7d4f3b9a7081:0, corePoolSize=1, maxPoolSize=1 2024-12-02T21:11:22,247 DEBUG [RS:0;7d4f3b9a7081:45421 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/7d4f3b9a7081:0, corePoolSize=1, maxPoolSize=1 2024-12-02T21:11:22,247 DEBUG [RS:0;7d4f3b9a7081:45421 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/7d4f3b9a7081:0, corePoolSize=3, maxPoolSize=3 2024-12-02T21:11:22,247 DEBUG [RS:0;7d4f3b9a7081:45421 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/7d4f3b9a7081:0, corePoolSize=3, maxPoolSize=3 2024-12-02T21:11:22,247 INFO [RS:0;7d4f3b9a7081:45421 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-02T21:11:22,247 INFO [RS:0;7d4f3b9a7081:45421 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-02T21:11:22,247 INFO [RS:0;7d4f3b9a7081:45421 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-02T21:11:22,247 INFO [RS:0;7d4f3b9a7081:45421 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-02T21:11:22,247 INFO [RS:0;7d4f3b9a7081:45421 {}] hbase.ChoreService(168): Chore ScheduledChore name=7d4f3b9a7081,45421,1733173881847-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-02T21:11:22,260 INFO [RS:0;7d4f3b9a7081:45421 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-02T21:11:22,261 INFO [RS:0;7d4f3b9a7081:45421 {}] hbase.ChoreService(168): Chore ScheduledChore name=7d4f3b9a7081,45421,1733173881847-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-02T21:11:22,273 INFO [RS:0;7d4f3b9a7081:45421 {}] regionserver.Replication(204): 7d4f3b9a7081,45421,1733173881847 started 2024-12-02T21:11:22,273 INFO [RS:0;7d4f3b9a7081:45421 {}] regionserver.HRegionServer(1767): Serving as 7d4f3b9a7081,45421,1733173881847, RpcServer on 7d4f3b9a7081/172.17.0.2:45421, sessionid=0x101992c4dcc0001 2024-12-02T21:11:22,273 DEBUG [RS:0;7d4f3b9a7081:45421 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-02T21:11:22,273 DEBUG [RS:0;7d4f3b9a7081:45421 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 7d4f3b9a7081,45421,1733173881847 2024-12-02T21:11:22,273 DEBUG [RS:0;7d4f3b9a7081:45421 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '7d4f3b9a7081,45421,1733173881847' 2024-12-02T21:11:22,273 DEBUG [RS:0;7d4f3b9a7081:45421 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-02T21:11:22,274 DEBUG [RS:0;7d4f3b9a7081:45421 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-02T21:11:22,274 DEBUG [RS:0;7d4f3b9a7081:45421 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-02T21:11:22,274 DEBUG [RS:0;7d4f3b9a7081:45421 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-02T21:11:22,274 DEBUG [RS:0;7d4f3b9a7081:45421 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 7d4f3b9a7081,45421,1733173881847 2024-12-02T21:11:22,274 DEBUG [RS:0;7d4f3b9a7081:45421 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '7d4f3b9a7081,45421,1733173881847' 2024-12-02T21:11:22,274 DEBUG [RS:0;7d4f3b9a7081:45421 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-02T21:11:22,275 DEBUG [RS:0;7d4f3b9a7081:45421 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-02T21:11:22,275 DEBUG [RS:0;7d4f3b9a7081:45421 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-02T21:11:22,275 INFO [RS:0;7d4f3b9a7081:45421 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-02T21:11:22,275 INFO [RS:0;7d4f3b9a7081:45421 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-02T21:11:22,311 WARN [7d4f3b9a7081:44741 {}] assignment.AssignmentManager(2423): No servers available; cannot place 1 unassigned regions. 2024-12-02T21:11:22,378 INFO [RS:0;7d4f3b9a7081:45421 {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=7d4f3b9a7081%2C45421%2C1733173881847, suffix=, logDir=hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/WALs/7d4f3b9a7081,45421,1733173881847, archiveDir=hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/oldWALs, maxLogs=32 2024-12-02T21:11:22,379 INFO [RS:0;7d4f3b9a7081:45421 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7d4f3b9a7081%2C45421%2C1733173881847.1733173882379 2024-12-02T21:11:22,387 INFO [RS:0;7d4f3b9a7081:45421 {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/WALs/7d4f3b9a7081,45421,1733173881847/7d4f3b9a7081%2C45421%2C1733173881847.1733173882379 2024-12-02T21:11:22,388 DEBUG [RS:0;7d4f3b9a7081:45421 {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33209:33209),(127.0.0.1/127.0.0.1:42237:42237)] 2024-12-02T21:11:22,460 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.meta.1733173708307.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:11:22,561 DEBUG [7d4f3b9a7081:44741 {}] assignment.AssignmentManager(2444): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-12-02T21:11:22,561 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=7d4f3b9a7081,45421,1733173881847 2024-12-02T21:11:22,562 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 7d4f3b9a7081,45421,1733173881847, state=OPENING 2024-12-02T21:11:22,573 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-02T21:11:22,582 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44741-0x101992c4dcc0000, quorum=127.0.0.1:59514, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T21:11:22,582 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45421-0x101992c4dcc0001, quorum=127.0.0.1:59514, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T21:11:22,582 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE; OpenRegionProcedure 1588230740, server=7d4f3b9a7081,45421,1733173881847}] 2024-12-02T21:11:22,583 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-02T21:11:22,583 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-02T21:11:22,736 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 7d4f3b9a7081,45421,1733173881847 2024-12-02T21:11:22,736 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-02T21:11:22,741 INFO [RS-EventLoopGroup-13-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34164, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-02T21:11:22,745 INFO [RS_OPEN_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(135): Open hbase:meta,,1.1588230740 2024-12-02T21:11:22,745 INFO [RS_OPEN_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-02T21:11:22,748 INFO [RS_OPEN_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=7d4f3b9a7081%2C45421%2C1733173881847.meta, suffix=.meta, logDir=hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/WALs/7d4f3b9a7081,45421,1733173881847, archiveDir=hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/oldWALs, maxLogs=32 2024-12-02T21:11:22,748 INFO [RS_OPEN_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 7d4f3b9a7081%2C45421%2C1733173881847.meta.1733173882748.meta 2024-12-02T21:11:22,758 INFO [RS_OPEN_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/WALs/7d4f3b9a7081,45421,1733173881847/7d4f3b9a7081%2C45421%2C1733173881847.meta.1733173882748.meta 2024-12-02T21:11:22,758 DEBUG [RS_OPEN_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33209:33209),(127.0.0.1/127.0.0.1:42237:42237)] 2024-12-02T21:11:22,758 DEBUG [RS_OPEN_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7285): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-02T21:11:22,758 DEBUG [RS_OPEN_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-02T21:11:22,758 DEBUG [RS_OPEN_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7999): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-02T21:11:22,758 INFO [RS_OPEN_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(436): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-02T21:11:22,758 DEBUG [RS_OPEN_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-02T21:11:22,758 DEBUG [RS_OPEN_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(894): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T21:11:22,758 DEBUG [RS_OPEN_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7327): checking encryption for 1588230740 2024-12-02T21:11:22,758 DEBUG [RS_OPEN_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7330): checking classloading for 1588230740 2024-12-02T21:11:22,759 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-02T21:11:22,760 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-02T21:11:22,760 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T21:11:22,761 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-02T21:11:22,761 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-02T21:11:22,761 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-02T21:11:22,761 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T21:11:22,762 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-02T21:11:22,762 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-02T21:11:22,762 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-02T21:11:22,762 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T21:11:22,763 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-02T21:11:22,763 DEBUG [RS_OPEN_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/hbase/meta/1588230740 2024-12-02T21:11:22,764 DEBUG [RS_OPEN_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/hbase/meta/1588230740 2024-12-02T21:11:22,765 DEBUG [RS_OPEN_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-02T21:11:22,766 DEBUG [RS_OPEN_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1085): writing seq id for 1588230740 2024-12-02T21:11:22,767 INFO [RS_OPEN_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1102): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=860646, jitterRate=0.09436915814876556}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-02T21:11:22,767 DEBUG [RS_OPEN_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1001): Region open journal for 1588230740: 2024-12-02T21:11:22,768 INFO [RS_OPEN_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2601): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733173882735 2024-12-02T21:11:22,769 DEBUG [RS_OPEN_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2628): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-02T21:11:22,770 INFO [RS_OPEN_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(164): Opened hbase:meta,,1.1588230740 2024-12-02T21:11:22,770 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=7d4f3b9a7081,45421,1733173881847 2024-12-02T21:11:22,771 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 7d4f3b9a7081,45421,1733173881847, state=OPEN 2024-12-02T21:11:22,803 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45421-0x101992c4dcc0001, quorum=127.0.0.1:59514, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-02T21:11:22,803 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44741-0x101992c4dcc0000, quorum=127.0.0.1:59514, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-02T21:11:22,803 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-02T21:11:22,803 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-02T21:11:22,806 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=3, resume processing ppid=2 2024-12-02T21:11:22,806 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=3, ppid=2, state=SUCCESS; OpenRegionProcedure 1588230740, server=7d4f3b9a7081,45421,1733173881847 in 221 msec 2024-12-02T21:11:22,808 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=2, resume processing ppid=1 2024-12-02T21:11:22,808 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=2, ppid=1, state=SUCCESS; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 647 msec 2024-12-02T21:11:22,811 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=1, state=SUCCESS; InitMetaProcedure table=hbase:meta in 687 msec 2024-12-02T21:11:22,811 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] master.HMaster(1088): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733173882811, completionTime=-1 2024-12-02T21:11:22,812 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] master.ServerManager(907): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-12-02T21:11:22,812 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] assignment.AssignmentManager(1747): Joining cluster... 2024-12-02T21:11:22,813 DEBUG [hconnection-0x640d9735-shared-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T21:11:22,815 INFO [RS-EventLoopGroup-13-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34176, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T21:11:22,816 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] assignment.AssignmentManager(1759): Number of RegionServers=1 2024-12-02T21:11:22,816 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733173942816 2024-12-02T21:11:22,816 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733174002816 2024-12-02T21:11:22,816 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] assignment.AssignmentManager(1766): Joined the cluster in 4 msec 2024-12-02T21:11:22,840 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7d4f3b9a7081,44741,1733173881701-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-02T21:11:22,840 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7d4f3b9a7081,44741,1733173881701-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-02T21:11:22,841 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7d4f3b9a7081,44741,1733173881701-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-02T21:11:22,841 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-7d4f3b9a7081:44741, period=300000, unit=MILLISECONDS is enabled. 2024-12-02T21:11:22,841 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-02T21:11:22,841 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] master.TableNamespaceManager(92): Namespace table not found. Creating... 2024-12-02T21:11:22,841 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] master.HMaster(2425): Client=null/null create 'hbase:namespace', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '10', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-02T21:11:22,843 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=hbase:namespace 2024-12-02T21:11:22,844 DEBUG [master/7d4f3b9a7081:0.Chore.1 {}] janitor.CatalogJanitor(179): 2024-12-02T21:11:22,845 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_PRE_OPERATION 2024-12-02T21:11:22,845 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T21:11:22,847 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-02T21:11:22,857 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46855 is added to blk_1073741835_1011 (size=358) 2024-12-02T21:11:22,857 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32945 is added to blk_1073741835_1011 (size=358) 2024-12-02T21:11:22,860 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 5dda45032950b51afffe60c272c5fabd, NAME => 'hbase:namespace,,1733173882841.5dda45032950b51afffe60c272c5fabd.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:namespace', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '10', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30 2024-12-02T21:11:22,870 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46855 is added to blk_1073741836_1012 (size=42) 2024-12-02T21:11:22,870 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32945 is added to blk_1073741836_1012 (size=42) 2024-12-02T21:11:22,871 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(894): Instantiated hbase:namespace,,1733173882841.5dda45032950b51afffe60c272c5fabd.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T21:11:22,871 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1681): Closing 5dda45032950b51afffe60c272c5fabd, disabling compactions & flushes 2024-12-02T21:11:22,871 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1703): Closing region hbase:namespace,,1733173882841.5dda45032950b51afffe60c272c5fabd. 2024-12-02T21:11:22,871 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:namespace,,1733173882841.5dda45032950b51afffe60c272c5fabd. 2024-12-02T21:11:22,871 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on hbase:namespace,,1733173882841.5dda45032950b51afffe60c272c5fabd. after waiting 0 ms 2024-12-02T21:11:22,871 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region hbase:namespace,,1733173882841.5dda45032950b51afffe60c272c5fabd. 2024-12-02T21:11:22,872 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1922): Closed hbase:namespace,,1733173882841.5dda45032950b51afffe60c272c5fabd. 2024-12-02T21:11:22,872 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1635): Region close journal for 5dda45032950b51afffe60c272c5fabd: 2024-12-02T21:11:22,873 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_ADD_TO_META 2024-12-02T21:11:22,873 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"hbase:namespace,,1733173882841.5dda45032950b51afffe60c272c5fabd.","families":{"info":[{"qualifier":"regioninfo","vlen":41,"tag":[],"timestamp":"1733173882873"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733173882873"}]},"ts":"1733173882873"} 2024-12-02T21:11:22,875 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-02T21:11:22,876 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-02T21:11:22,876 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:namespace","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733173882876"}]},"ts":"1733173882876"} 2024-12-02T21:11:22,877 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:namespace, state=ENABLING in hbase:meta 2024-12-02T21:11:22,895 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:namespace, region=5dda45032950b51afffe60c272c5fabd, ASSIGN}] 2024-12-02T21:11:22,896 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:namespace, region=5dda45032950b51afffe60c272c5fabd, ASSIGN 2024-12-02T21:11:22,897 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(264): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=hbase:namespace, region=5dda45032950b51afffe60c272c5fabd, ASSIGN; state=OFFLINE, location=7d4f3b9a7081,45421,1733173881847; forceNewPlan=false, retain=false 2024-12-02T21:11:23,048 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=5 updating hbase:meta row=5dda45032950b51afffe60c272c5fabd, regionState=OPENING, regionLocation=7d4f3b9a7081,45421,1733173881847 2024-12-02T21:11:23,052 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE; OpenRegionProcedure 5dda45032950b51afffe60c272c5fabd, server=7d4f3b9a7081,45421,1733173881847}] 2024-12-02T21:11:23,206 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 7d4f3b9a7081,45421,1733173881847 2024-12-02T21:11:23,211 INFO [RS_OPEN_PRIORITY_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(135): Open hbase:namespace,,1733173882841.5dda45032950b51afffe60c272c5fabd. 2024-12-02T21:11:23,211 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7285): Opening region: {ENCODED => 5dda45032950b51afffe60c272c5fabd, NAME => 'hbase:namespace,,1733173882841.5dda45032950b51afffe60c272c5fabd.', STARTKEY => '', ENDKEY => ''} 2024-12-02T21:11:23,211 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table namespace 5dda45032950b51afffe60c272c5fabd 2024-12-02T21:11:23,211 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(894): Instantiated hbase:namespace,,1733173882841.5dda45032950b51afffe60c272c5fabd.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T21:11:23,212 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7327): checking encryption for 5dda45032950b51afffe60c272c5fabd 2024-12-02T21:11:23,212 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7330): checking classloading for 5dda45032950b51afffe60c272c5fabd 2024-12-02T21:11:23,213 INFO [StoreOpener-5dda45032950b51afffe60c272c5fabd-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 5dda45032950b51afffe60c272c5fabd 2024-12-02T21:11:23,215 INFO [StoreOpener-5dda45032950b51afffe60c272c5fabd-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 5dda45032950b51afffe60c272c5fabd columnFamilyName info 2024-12-02T21:11:23,215 DEBUG [StoreOpener-5dda45032950b51afffe60c272c5fabd-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T21:11:23,216 INFO [StoreOpener-5dda45032950b51afffe60c272c5fabd-1 {}] regionserver.HStore(327): Store=5dda45032950b51afffe60c272c5fabd/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T21:11:23,217 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/hbase/namespace/5dda45032950b51afffe60c272c5fabd 2024-12-02T21:11:23,217 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/hbase/namespace/5dda45032950b51afffe60c272c5fabd 2024-12-02T21:11:23,219 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1085): writing seq id for 5dda45032950b51afffe60c272c5fabd 2024-12-02T21:11:23,221 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/hbase/namespace/5dda45032950b51afffe60c272c5fabd/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-02T21:11:23,222 INFO [RS_OPEN_PRIORITY_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1102): Opened 5dda45032950b51afffe60c272c5fabd; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=696138, jitterRate=-0.11481495201587677}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-02T21:11:23,222 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1001): Region open journal for 5dda45032950b51afffe60c272c5fabd: 2024-12-02T21:11:23,223 INFO [RS_OPEN_PRIORITY_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2601): Post open deploy tasks for hbase:namespace,,1733173882841.5dda45032950b51afffe60c272c5fabd., pid=6, masterSystemTime=1733173883206 2024-12-02T21:11:23,224 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2628): Finished post open deploy task for hbase:namespace,,1733173882841.5dda45032950b51afffe60c272c5fabd. 2024-12-02T21:11:23,224 INFO [RS_OPEN_PRIORITY_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(164): Opened hbase:namespace,,1733173882841.5dda45032950b51afffe60c272c5fabd. 2024-12-02T21:11:23,225 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=5 updating hbase:meta row=5dda45032950b51afffe60c272c5fabd, regionState=OPEN, openSeqNum=2, regionLocation=7d4f3b9a7081,45421,1733173881847 2024-12-02T21:11:23,228 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=6, resume processing ppid=5 2024-12-02T21:11:23,228 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=6, ppid=5, state=SUCCESS; OpenRegionProcedure 5dda45032950b51afffe60c272c5fabd, server=7d4f3b9a7081,45421,1733173881847 in 174 msec 2024-12-02T21:11:23,229 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=5, resume processing ppid=4 2024-12-02T21:11:23,229 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=5, ppid=4, state=SUCCESS; TransitRegionStateProcedure table=hbase:namespace, region=5dda45032950b51afffe60c272c5fabd, ASSIGN in 333 msec 2024-12-02T21:11:23,230 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-02T21:11:23,230 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:namespace","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733173883230"}]},"ts":"1733173883230"} 2024-12-02T21:11:23,231 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:namespace, state=ENABLED in hbase:meta 2024-12-02T21:11:23,240 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_POST_OPERATION 2024-12-02T21:11:23,242 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=4, state=SUCCESS; CreateTableProcedure table=hbase:namespace in 400 msec 2024-12-02T21:11:23,244 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] zookeeper.ZKUtil(113): master:44741-0x101992c4dcc0000, quorum=127.0.0.1:59514, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/namespace 2024-12-02T21:11:23,312 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45421-0x101992c4dcc0001, quorum=127.0.0.1:59514, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T21:11:23,312 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44741-0x101992c4dcc0000, quorum=127.0.0.1:59514, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/namespace 2024-12-02T21:11:23,312 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44741-0x101992c4dcc0000, quorum=127.0.0.1:59514, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T21:11:23,319 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=7, state=RUNNABLE:CREATE_NAMESPACE_PREPARE; CreateNamespaceProcedure, namespace=default 2024-12-02T21:11:23,331 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44741-0x101992c4dcc0000, quorum=127.0.0.1:59514, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/namespace 2024-12-02T21:11:23,342 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=7, state=SUCCESS; CreateNamespaceProcedure, namespace=default in 23 msec 2024-12-02T21:11:23,352 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=8, state=RUNNABLE:CREATE_NAMESPACE_PREPARE; CreateNamespaceProcedure, namespace=hbase 2024-12-02T21:11:23,370 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44741-0x101992c4dcc0000, quorum=127.0.0.1:59514, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/namespace 2024-12-02T21:11:23,380 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=8, state=SUCCESS; CreateNamespaceProcedure, namespace=hbase in 28 msec 2024-12-02T21:11:23,398 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44741-0x101992c4dcc0000, quorum=127.0.0.1:59514, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/namespace/default 2024-12-02T21:11:23,415 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44741-0x101992c4dcc0000, quorum=127.0.0.1:59514, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/namespace/hbase 2024-12-02T21:11:23,415 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] master.HMaster(1218): Master has completed initialization 1.525sec 2024-12-02T21:11:23,415 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-02T21:11:23,415 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-02T21:11:23,415 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-02T21:11:23,415 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-02T21:11:23,415 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-02T21:11:23,415 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7d4f3b9a7081,44741,1733173881701-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-02T21:11:23,415 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7d4f3b9a7081,44741,1733173881701-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-02T21:11:23,417 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] master.HMaster(1321): Balancer post startup initialization complete, took 0 seconds 2024-12-02T21:11:23,417 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-02T21:11:23,418 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7d4f3b9a7081,44741,1733173881701-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-02T21:11:23,460 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.meta.1733173708307.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:11:23,469 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x07e604e4 to 127.0.0.1:59514 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@139899c5 2024-12-02T21:11:23,482 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2e11cc1f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T21:11:23,486 DEBUG [hconnection-0x2ef5dd08-shared-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T21:11:23,489 INFO [RS-EventLoopGroup-13-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34180, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T21:11:23,492 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1199): Minicluster is up; activeMaster=7d4f3b9a7081,44741,1733173881701 2024-12-02T21:11:23,492 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T21:11:23,495 INFO [Time-limited test {}] master.MasterRpcServices(506): Client=null/null set balanceSwitch=false 2024-12-02T21:11:23,496 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-02T21:11:23,498 INFO [RS-EventLoopGroup-12-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55006, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-02T21:11:23,499 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44741 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-12-02T21:11:23,499 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44741 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-12-02T21:11:23,499 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44741 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestLogRolling-testLogRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-02T21:11:23,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44741 {}] procedure2.ProcedureExecutor(1098): Stored pid=9, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestLogRolling-testLogRolling 2024-12-02T21:11:23,501 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-12-02T21:11:23,501 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T21:11:23,501 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44741 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRolling" procId is: 9 2024-12-02T21:11:23,502 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-02T21:11:23,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-02T21:11:23,508 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32945 is added to blk_1073741837_1013 (size=381) 2024-12-02T21:11:23,509 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46855 is added to blk_1073741837_1013 (size=381) 2024-12-02T21:11:23,511 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 9f99cceab3284d89ba5d313b73c9eac6, NAME => 'TestLogRolling-testLogRolling,,1733173883499.9f99cceab3284d89ba5d313b73c9eac6.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30 2024-12-02T21:11:23,518 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46855 is added to blk_1073741838_1014 (size=64) 2024-12-02T21:11:23,518 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32945 is added to blk_1073741838_1014 (size=64) 2024-12-02T21:11:23,519 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(894): Instantiated TestLogRolling-testLogRolling,,1733173883499.9f99cceab3284d89ba5d313b73c9eac6.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T21:11:23,519 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1681): Closing 9f99cceab3284d89ba5d313b73c9eac6, disabling compactions & flushes 2024-12-02T21:11:23,519 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1703): Closing region TestLogRolling-testLogRolling,,1733173883499.9f99cceab3284d89ba5d313b73c9eac6. 2024-12-02T21:11:23,519 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestLogRolling-testLogRolling,,1733173883499.9f99cceab3284d89ba5d313b73c9eac6. 2024-12-02T21:11:23,519 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestLogRolling-testLogRolling,,1733173883499.9f99cceab3284d89ba5d313b73c9eac6. after waiting 0 ms 2024-12-02T21:11:23,519 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestLogRolling-testLogRolling,,1733173883499.9f99cceab3284d89ba5d313b73c9eac6. 2024-12-02T21:11:23,519 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1922): Closed TestLogRolling-testLogRolling,,1733173883499.9f99cceab3284d89ba5d313b73c9eac6. 2024-12-02T21:11:23,519 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1635): Region close journal for 9f99cceab3284d89ba5d313b73c9eac6: 2024-12-02T21:11:23,520 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_ADD_TO_META 2024-12-02T21:11:23,520 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestLogRolling-testLogRolling,,1733173883499.9f99cceab3284d89ba5d313b73c9eac6.","families":{"info":[{"qualifier":"regioninfo","vlen":63,"tag":[],"timestamp":"1733173883520"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733173883520"}]},"ts":"1733173883520"} 2024-12-02T21:11:23,522 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-02T21:11:23,523 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-02T21:11:23,523 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestLogRolling-testLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733173883523"}]},"ts":"1733173883523"} 2024-12-02T21:11:23,525 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestLogRolling-testLogRolling, state=ENABLING in hbase:meta 2024-12-02T21:11:23,540 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=9f99cceab3284d89ba5d313b73c9eac6, ASSIGN}] 2024-12-02T21:11:23,541 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=9f99cceab3284d89ba5d313b73c9eac6, ASSIGN 2024-12-02T21:11:23,542 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=9f99cceab3284d89ba5d313b73c9eac6, ASSIGN; state=OFFLINE, location=7d4f3b9a7081,45421,1733173881847; forceNewPlan=false, retain=false 2024-12-02T21:11:23,692 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=10 updating hbase:meta row=9f99cceab3284d89ba5d313b73c9eac6, regionState=OPENING, regionLocation=7d4f3b9a7081,45421,1733173881847 2024-12-02T21:11:23,695 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=11, ppid=10, state=RUNNABLE; OpenRegionProcedure 9f99cceab3284d89ba5d313b73c9eac6, server=7d4f3b9a7081,45421,1733173881847}] 2024-12-02T21:11:23,848 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 7d4f3b9a7081,45421,1733173881847 2024-12-02T21:11:23,851 INFO [RS_OPEN_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(135): Open TestLogRolling-testLogRolling,,1733173883499.9f99cceab3284d89ba5d313b73c9eac6. 2024-12-02T21:11:23,852 DEBUG [RS_OPEN_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7285): Opening region: {ENCODED => 9f99cceab3284d89ba5d313b73c9eac6, NAME => 'TestLogRolling-testLogRolling,,1733173883499.9f99cceab3284d89ba5d313b73c9eac6.', STARTKEY => '', ENDKEY => ''} 2024-12-02T21:11:23,852 DEBUG [RS_OPEN_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling 9f99cceab3284d89ba5d313b73c9eac6 2024-12-02T21:11:23,852 DEBUG [RS_OPEN_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(894): Instantiated TestLogRolling-testLogRolling,,1733173883499.9f99cceab3284d89ba5d313b73c9eac6.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T21:11:23,852 DEBUG [RS_OPEN_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7327): checking encryption for 9f99cceab3284d89ba5d313b73c9eac6 2024-12-02T21:11:23,852 DEBUG [RS_OPEN_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7330): checking classloading for 9f99cceab3284d89ba5d313b73c9eac6 2024-12-02T21:11:23,854 INFO [StoreOpener-9f99cceab3284d89ba5d313b73c9eac6-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 9f99cceab3284d89ba5d313b73c9eac6 2024-12-02T21:11:23,855 INFO [StoreOpener-9f99cceab3284d89ba5d313b73c9eac6-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 9f99cceab3284d89ba5d313b73c9eac6 columnFamilyName info 2024-12-02T21:11:23,855 DEBUG [StoreOpener-9f99cceab3284d89ba5d313b73c9eac6-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T21:11:23,856 INFO [StoreOpener-9f99cceab3284d89ba5d313b73c9eac6-1 {}] regionserver.HStore(327): Store=9f99cceab3284d89ba5d313b73c9eac6/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T21:11:23,857 DEBUG [RS_OPEN_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/9f99cceab3284d89ba5d313b73c9eac6 2024-12-02T21:11:23,857 DEBUG [RS_OPEN_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/9f99cceab3284d89ba5d313b73c9eac6 2024-12-02T21:11:23,860 DEBUG [RS_OPEN_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1085): writing seq id for 9f99cceab3284d89ba5d313b73c9eac6 2024-12-02T21:11:23,862 DEBUG [RS_OPEN_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/9f99cceab3284d89ba5d313b73c9eac6/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-02T21:11:23,863 INFO [RS_OPEN_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1102): Opened 9f99cceab3284d89ba5d313b73c9eac6; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=732616, jitterRate=-0.0684308409690857}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-02T21:11:23,864 DEBUG [RS_OPEN_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1001): Region open journal for 9f99cceab3284d89ba5d313b73c9eac6: 2024-12-02T21:11:23,865 INFO [RS_OPEN_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2601): Post open deploy tasks for TestLogRolling-testLogRolling,,1733173883499.9f99cceab3284d89ba5d313b73c9eac6., pid=11, masterSystemTime=1733173883848 2024-12-02T21:11:23,866 DEBUG [RS_OPEN_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2628): Finished post open deploy task for TestLogRolling-testLogRolling,,1733173883499.9f99cceab3284d89ba5d313b73c9eac6. 2024-12-02T21:11:23,866 INFO [RS_OPEN_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(164): Opened TestLogRolling-testLogRolling,,1733173883499.9f99cceab3284d89ba5d313b73c9eac6. 2024-12-02T21:11:23,867 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=10 updating hbase:meta row=9f99cceab3284d89ba5d313b73c9eac6, regionState=OPEN, openSeqNum=2, regionLocation=7d4f3b9a7081,45421,1733173881847 2024-12-02T21:11:23,871 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=11, resume processing ppid=10 2024-12-02T21:11:23,871 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=11, ppid=10, state=SUCCESS; OpenRegionProcedure 9f99cceab3284d89ba5d313b73c9eac6, server=7d4f3b9a7081,45421,1733173881847 in 174 msec 2024-12-02T21:11:23,874 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=10, resume processing ppid=9 2024-12-02T21:11:23,874 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=10, ppid=9, state=SUCCESS; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=9f99cceab3284d89ba5d313b73c9eac6, ASSIGN in 331 msec 2024-12-02T21:11:23,874 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-02T21:11:23,875 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestLogRolling-testLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733173883874"}]},"ts":"1733173883874"} 2024-12-02T21:11:23,876 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestLogRolling-testLogRolling, state=ENABLED in hbase:meta 2024-12-02T21:11:23,888 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_POST_OPERATION 2024-12-02T21:11:23,890 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=9, state=SUCCESS; CreateTableProcedure table=TestLogRolling-testLogRolling in 389 msec 2024-12-02T21:11:24,008 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling 2024-12-02T21:11:24,008 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling Metrics about Tables on a single HBase RegionServer 2024-12-02T21:11:24,010 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-02T21:11:24,461 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.meta.1733173708307.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:11:24,590 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:11:24,590 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:11:24,590 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:11:24,591 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:11:24,591 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:11:24,591 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:11:24,607 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:11:24,608 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:11:24,608 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:11:24,608 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:11:24,608 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:11:24,608 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:11:24,611 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:11:24,611 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:11:24,611 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:11:24,612 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:11:25,115 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-02T21:11:25,116 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:11:25,116 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:11:25,117 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:11:25,117 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:11:25,117 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:11:25,117 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:11:25,135 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:11:25,135 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:11:25,135 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:11:25,135 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:11:25,135 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:11:25,136 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:11:25,138 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:11:25,138 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:11:25,138 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:11:25,140 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:11:25,462 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.meta.1733173708307.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:11:26,463 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.meta.1733173708307.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:11:27,464 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.meta.1733173708307.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:11:28,242 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-02T21:11:28,243 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:namespace' 2024-12-02T21:11:28,245 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRolling' 2024-12-02T21:11:28,465 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.meta.1733173708307.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:11:29,466 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.meta.1733173708307.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:11:30,466 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.meta.1733173708307.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:11:31,467 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.meta.1733173708307.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:11:32,467 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.meta.1733173708307.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:11:33,469 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.meta.1733173708307.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:11:33,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-02T21:11:33,505 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestLogRolling-testLogRolling, procId: 9 completed 2024-12-02T21:11:33,510 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2718): Found 1 regions for table TestLogRolling-testLogRolling 2024-12-02T21:11:33,510 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2724): firstRegionName=TestLogRolling-testLogRolling,,1733173883499.9f99cceab3284d89ba5d313b73c9eac6. 2024-12-02T21:11:33,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45421 {}] regionserver.HRegion(8581): Flush requested on 9f99cceab3284d89ba5d313b73c9eac6 2024-12-02T21:11:33,524 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 9f99cceab3284d89ba5d313b73c9eac6 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-02T21:11:33,542 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/9f99cceab3284d89ba5d313b73c9eac6/.tmp/info/401f5a7054b54ed3b9f182af1a7f6c1d is 1080, key is row0001/info:/1733173893515/Put/seqid=0 2024-12-02T21:11:33,547 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32945 is added to blk_1073741839_1015 (size=12509) 2024-12-02T21:11:33,548 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46855 is added to blk_1073741839_1015 (size=12509) 2024-12-02T21:11:33,548 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/9f99cceab3284d89ba5d313b73c9eac6/.tmp/info/401f5a7054b54ed3b9f182af1a7f6c1d 2024-12-02T21:11:33,553 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45421 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=9f99cceab3284d89ba5d313b73c9eac6, server=7d4f3b9a7081,45421,1733173881847 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T21:11:33,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45421 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:34180 deadline: 1733173903552, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=9f99cceab3284d89ba5d313b73c9eac6, server=7d4f3b9a7081,45421,1733173881847 2024-12-02T21:11:33,555 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/9f99cceab3284d89ba5d313b73c9eac6/.tmp/info/401f5a7054b54ed3b9f182af1a7f6c1d as hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/9f99cceab3284d89ba5d313b73c9eac6/info/401f5a7054b54ed3b9f182af1a7f6c1d 2024-12-02T21:11:33,559 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/9f99cceab3284d89ba5d313b73c9eac6/info/401f5a7054b54ed3b9f182af1a7f6c1d, entries=7, sequenceid=11, filesize=12.2 K 2024-12-02T21:11:33,560 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=23.12 KB/23672 for 9f99cceab3284d89ba5d313b73c9eac6 in 36ms, sequenceid=11, compaction requested=false 2024-12-02T21:11:33,560 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 9f99cceab3284d89ba5d313b73c9eac6: 2024-12-02T21:11:34,416 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster-MemStoreChunkPool Statistics {}] regionserver.ChunkCreator$MemStoreChunkPool$StatisticsThread(417): data stats (chunk size=2097152): current pool size=1, created chunk count=12, reused chunk count=39, reuseRatio=76.47% 2024-12-02T21:11:34,417 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster-MemStoreChunkPool Statistics {}] regionserver.ChunkCreator$MemStoreChunkPool$StatisticsThread(417): index stats (chunk size=209715): current pool size=0, created chunk count=0, reused chunk count=0, reuseRatio=0 2024-12-02T21:11:34,469 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.meta.1733173708307.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:11:35,470 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.meta.1733173708307.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:11:36,472 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.meta.1733173708307.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:11:37,473 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.meta.1733173708307.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:11:38,473 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.meta.1733173708307.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:11:39,474 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.meta.1733173708307.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:11:40,475 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.meta.1733173708307.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:11:41,171 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:11:41,171 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:11:41,172 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:11:41,172 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:11:41,173 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:11:41,173 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:11:41,188 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:11:41,188 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:11:41,188 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:11:41,188 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:11:41,188 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:11:41,188 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:11:41,191 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:11:41,191 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:11:41,191 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:11:41,193 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:11:41,476 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.meta.1733173708307.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:11:41,698 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-02T21:11:41,699 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:11:41,700 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:11:41,700 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:11:41,701 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:11:41,701 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:11:41,701 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:11:41,720 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:11:41,720 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:11:41,720 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:11:41,721 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:11:41,721 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:11:41,721 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:11:41,723 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:11:41,723 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:11:41,723 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:11:41,725 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:11:42,477 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.meta.1733173708307.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:11:43,478 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.meta.1733173708307.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:11:43,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45421 {}] regionserver.HRegion(8581): Flush requested on 9f99cceab3284d89ba5d313b73c9eac6 2024-12-02T21:11:43,627 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 9f99cceab3284d89ba5d313b73c9eac6 1/1 column families, dataSize=24.17 KB heapSize=26.13 KB 2024-12-02T21:11:43,634 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/9f99cceab3284d89ba5d313b73c9eac6/.tmp/info/adf5ef9ccb7341f7bd5a6338e57b5d33 is 1080, key is row0008/info:/1733173893524/Put/seqid=0 2024-12-02T21:11:43,640 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32945 is added to blk_1073741840_1016 (size=29761) 2024-12-02T21:11:43,640 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46855 is added to blk_1073741840_1016 (size=29761) 2024-12-02T21:11:43,640 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.17 KB at sequenceid=37 (bloomFilter=true), to=hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/9f99cceab3284d89ba5d313b73c9eac6/.tmp/info/adf5ef9ccb7341f7bd5a6338e57b5d33 2024-12-02T21:11:43,647 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/9f99cceab3284d89ba5d313b73c9eac6/.tmp/info/adf5ef9ccb7341f7bd5a6338e57b5d33 as hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/9f99cceab3284d89ba5d313b73c9eac6/info/adf5ef9ccb7341f7bd5a6338e57b5d33 2024-12-02T21:11:43,652 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/9f99cceab3284d89ba5d313b73c9eac6/info/adf5ef9ccb7341f7bd5a6338e57b5d33, entries=23, sequenceid=37, filesize=29.1 K 2024-12-02T21:11:43,653 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~24.17 KB/24748, heapSize ~26.11 KB/26736, currentSize=2.10 KB/2152 for 9f99cceab3284d89ba5d313b73c9eac6 in 26ms, sequenceid=37, compaction requested=false 2024-12-02T21:11:43,653 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 9f99cceab3284d89ba5d313b73c9eac6: 2024-12-02T21:11:43,653 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(109): Should split because info size=41.3 K, sizeToCheck=16.0 K 2024-12-02T21:11:43,653 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-02T21:11:43,653 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/9f99cceab3284d89ba5d313b73c9eac6/info/adf5ef9ccb7341f7bd5a6338e57b5d33 because midkey is the same as first or last row 2024-12-02T21:11:44,479 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.meta.1733173708307.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:11:45,481 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.meta.1733173708307.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:11:45,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45421 {}] regionserver.HRegion(8581): Flush requested on 9f99cceab3284d89ba5d313b73c9eac6 2024-12-02T21:11:45,638 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 9f99cceab3284d89ba5d313b73c9eac6 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-02T21:11:45,643 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/9f99cceab3284d89ba5d313b73c9eac6/.tmp/info/ee110b841d4f4711856bd9894db27ad2 is 1080, key is row0031/info:/1733173903628/Put/seqid=0 2024-12-02T21:11:45,651 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46855 is added to blk_1073741841_1017 (size=12509) 2024-12-02T21:11:45,652 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32945 is added to blk_1073741841_1017 (size=12509) 2024-12-02T21:11:45,654 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=47 (bloomFilter=true), to=hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/9f99cceab3284d89ba5d313b73c9eac6/.tmp/info/ee110b841d4f4711856bd9894db27ad2 2024-12-02T21:11:45,661 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/9f99cceab3284d89ba5d313b73c9eac6/.tmp/info/ee110b841d4f4711856bd9894db27ad2 as hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/9f99cceab3284d89ba5d313b73c9eac6/info/ee110b841d4f4711856bd9894db27ad2 2024-12-02T21:11:45,666 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/9f99cceab3284d89ba5d313b73c9eac6/info/ee110b841d4f4711856bd9894db27ad2, entries=7, sequenceid=47, filesize=12.2 K 2024-12-02T21:11:45,667 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=21.02 KB/21520 for 9f99cceab3284d89ba5d313b73c9eac6 in 29ms, sequenceid=47, compaction requested=true 2024-12-02T21:11:45,667 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 9f99cceab3284d89ba5d313b73c9eac6: 2024-12-02T21:11:45,667 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(109): Should split because info size=53.5 K, sizeToCheck=16.0 K 2024-12-02T21:11:45,667 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-02T21:11:45,667 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/9f99cceab3284d89ba5d313b73c9eac6/info/adf5ef9ccb7341f7bd5a6338e57b5d33 because midkey is the same as first or last row 2024-12-02T21:11:45,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45421 {}] regionserver.HRegion(8581): Flush requested on 9f99cceab3284d89ba5d313b73c9eac6 2024-12-02T21:11:45,667 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9f99cceab3284d89ba5d313b73c9eac6:info, priority=-2147483648, current under compaction store size is 1 2024-12-02T21:11:45,668 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T21:11:45,668 DEBUG [RS:0;7d4f3b9a7081:45421-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-02T21:11:45,668 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 9f99cceab3284d89ba5d313b73c9eac6 1/1 column families, dataSize=22.07 KB heapSize=23.88 KB 2024-12-02T21:11:45,669 DEBUG [RS:0;7d4f3b9a7081:45421-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 54779 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-02T21:11:45,669 DEBUG [RS:0;7d4f3b9a7081:45421-shortCompactions-0 {}] regionserver.HStore(1540): 9f99cceab3284d89ba5d313b73c9eac6/info is initiating minor compaction (all files) 2024-12-02T21:11:45,669 INFO [RS:0;7d4f3b9a7081:45421-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9f99cceab3284d89ba5d313b73c9eac6/info in TestLogRolling-testLogRolling,,1733173883499.9f99cceab3284d89ba5d313b73c9eac6. 2024-12-02T21:11:45,669 INFO [RS:0;7d4f3b9a7081:45421-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/9f99cceab3284d89ba5d313b73c9eac6/info/401f5a7054b54ed3b9f182af1a7f6c1d, hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/9f99cceab3284d89ba5d313b73c9eac6/info/adf5ef9ccb7341f7bd5a6338e57b5d33, hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/9f99cceab3284d89ba5d313b73c9eac6/info/ee110b841d4f4711856bd9894db27ad2] into tmpdir=hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/9f99cceab3284d89ba5d313b73c9eac6/.tmp, totalSize=53.5 K 2024-12-02T21:11:45,670 DEBUG [RS:0;7d4f3b9a7081:45421-shortCompactions-0 {}] compactions.Compactor(224): Compacting 401f5a7054b54ed3b9f182af1a7f6c1d, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1733173893515 2024-12-02T21:11:45,670 DEBUG [RS:0;7d4f3b9a7081:45421-shortCompactions-0 {}] compactions.Compactor(224): Compacting adf5ef9ccb7341f7bd5a6338e57b5d33, keycount=23, bloomtype=ROW, size=29.1 K, encoding=NONE, compression=NONE, seqNum=37, earliestPutTs=1733173893524 2024-12-02T21:11:45,671 DEBUG [RS:0;7d4f3b9a7081:45421-shortCompactions-0 {}] compactions.Compactor(224): Compacting ee110b841d4f4711856bd9894db27ad2, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=47, earliestPutTs=1733173903628 2024-12-02T21:11:45,672 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/9f99cceab3284d89ba5d313b73c9eac6/.tmp/info/ab3aefbbabdd45a49a2fc8a7189b4959 is 1080, key is row0038/info:/1733173905639/Put/seqid=0 2024-12-02T21:11:45,680 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32945 is added to blk_1073741842_1018 (size=27607) 2024-12-02T21:11:45,680 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46855 is added to blk_1073741842_1018 (size=27607) 2024-12-02T21:11:45,681 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.07 KB at sequenceid=71 (bloomFilter=true), to=hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/9f99cceab3284d89ba5d313b73c9eac6/.tmp/info/ab3aefbbabdd45a49a2fc8a7189b4959 2024-12-02T21:11:45,687 INFO [RS:0;7d4f3b9a7081:45421-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9f99cceab3284d89ba5d313b73c9eac6#info#compaction#45 average throughput is 18.98 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-02T21:11:45,687 DEBUG [RS:0;7d4f3b9a7081:45421-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/9f99cceab3284d89ba5d313b73c9eac6/.tmp/info/852de29748204b5f88ce0fa139ec99f1 is 1080, key is row0001/info:/1733173893515/Put/seqid=0 2024-12-02T21:11:45,689 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/9f99cceab3284d89ba5d313b73c9eac6/.tmp/info/ab3aefbbabdd45a49a2fc8a7189b4959 as hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/9f99cceab3284d89ba5d313b73c9eac6/info/ab3aefbbabdd45a49a2fc8a7189b4959 2024-12-02T21:11:45,695 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/9f99cceab3284d89ba5d313b73c9eac6/info/ab3aefbbabdd45a49a2fc8a7189b4959, entries=21, sequenceid=71, filesize=27.0 K 2024-12-02T21:11:45,696 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32945 is added to blk_1073741843_1019 (size=44978) 2024-12-02T21:11:45,696 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~22.07 KB/22596, heapSize ~23.86 KB/24432, currentSize=6.30 KB/6456 for 9f99cceab3284d89ba5d313b73c9eac6 in 28ms, sequenceid=71, compaction requested=false 2024-12-02T21:11:45,696 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 9f99cceab3284d89ba5d313b73c9eac6: 2024-12-02T21:11:45,697 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(109): Should split because info size=80.5 K, sizeToCheck=16.0 K 2024-12-02T21:11:45,697 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46855 is added to blk_1073741843_1019 (size=44978) 2024-12-02T21:11:45,697 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-02T21:11:45,697 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/9f99cceab3284d89ba5d313b73c9eac6/info/adf5ef9ccb7341f7bd5a6338e57b5d33 because midkey is the same as first or last row 2024-12-02T21:11:45,703 DEBUG [RS:0;7d4f3b9a7081:45421-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/9f99cceab3284d89ba5d313b73c9eac6/.tmp/info/852de29748204b5f88ce0fa139ec99f1 as hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/9f99cceab3284d89ba5d313b73c9eac6/info/852de29748204b5f88ce0fa139ec99f1 2024-12-02T21:11:45,709 INFO [RS:0;7d4f3b9a7081:45421-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 9f99cceab3284d89ba5d313b73c9eac6/info of 9f99cceab3284d89ba5d313b73c9eac6 into 852de29748204b5f88ce0fa139ec99f1(size=43.9 K), total size for store is 70.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T21:11:45,709 DEBUG [RS:0;7d4f3b9a7081:45421-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9f99cceab3284d89ba5d313b73c9eac6: 2024-12-02T21:11:45,710 INFO [RS:0;7d4f3b9a7081:45421-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1733173883499.9f99cceab3284d89ba5d313b73c9eac6., storeName=9f99cceab3284d89ba5d313b73c9eac6/info, priority=13, startTime=1733173905667; duration=0sec 2024-12-02T21:11:45,710 DEBUG [RS:0;7d4f3b9a7081:45421-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(109): Should split because info size=70.9 K, sizeToCheck=16.0 K 2024-12-02T21:11:45,710 DEBUG [RS:0;7d4f3b9a7081:45421-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-02T21:11:45,710 DEBUG [RS:0;7d4f3b9a7081:45421-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/9f99cceab3284d89ba5d313b73c9eac6/info/852de29748204b5f88ce0fa139ec99f1 because midkey is the same as first or last row 2024-12-02T21:11:45,710 DEBUG [RS:0;7d4f3b9a7081:45421-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T21:11:45,710 DEBUG [RS:0;7d4f3b9a7081:45421-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9f99cceab3284d89ba5d313b73c9eac6:info 2024-12-02T21:11:46,482 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.meta.1733173708307.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:11:47,483 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.meta.1733173708307.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:11:47,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45421 {}] regionserver.HRegion(8581): Flush requested on 9f99cceab3284d89ba5d313b73c9eac6 2024-12-02T21:11:47,678 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 9f99cceab3284d89ba5d313b73c9eac6 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-02T21:11:47,685 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/9f99cceab3284d89ba5d313b73c9eac6/.tmp/info/da7471d7f5b847c6bd7743cc3ae3d9f8 is 1080, key is row0059/info:/1733173905668/Put/seqid=0 2024-12-02T21:11:47,691 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46855 is added to blk_1073741844_1020 (size=12509) 2024-12-02T21:11:47,691 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32945 is added to blk_1073741844_1020 (size=12509) 2024-12-02T21:11:47,692 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=82 (bloomFilter=true), to=hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/9f99cceab3284d89ba5d313b73c9eac6/.tmp/info/da7471d7f5b847c6bd7743cc3ae3d9f8 2024-12-02T21:11:47,698 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/9f99cceab3284d89ba5d313b73c9eac6/.tmp/info/da7471d7f5b847c6bd7743cc3ae3d9f8 as hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/9f99cceab3284d89ba5d313b73c9eac6/info/da7471d7f5b847c6bd7743cc3ae3d9f8 2024-12-02T21:11:47,703 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/9f99cceab3284d89ba5d313b73c9eac6/info/da7471d7f5b847c6bd7743cc3ae3d9f8, entries=7, sequenceid=82, filesize=12.2 K 2024-12-02T21:11:47,703 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=17.86 KB/18292 for 9f99cceab3284d89ba5d313b73c9eac6 in 25ms, sequenceid=82, compaction requested=true 2024-12-02T21:11:47,703 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 9f99cceab3284d89ba5d313b73c9eac6: 2024-12-02T21:11:47,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45421 {}] regionserver.HRegion(8581): Flush requested on 9f99cceab3284d89ba5d313b73c9eac6 2024-12-02T21:11:47,704 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(109): Should split because info size=83.1 K, sizeToCheck=16.0 K 2024-12-02T21:11:47,704 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-02T21:11:47,704 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/9f99cceab3284d89ba5d313b73c9eac6/info/852de29748204b5f88ce0fa139ec99f1 because midkey is the same as first or last row 2024-12-02T21:11:47,704 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9f99cceab3284d89ba5d313b73c9eac6:info, priority=-2147483648, current under compaction store size is 1 2024-12-02T21:11:47,704 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T21:11:47,704 DEBUG [RS:0;7d4f3b9a7081:45421-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-02T21:11:47,704 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 9f99cceab3284d89ba5d313b73c9eac6 1/1 column families, dataSize=18.91 KB heapSize=20.50 KB 2024-12-02T21:11:47,705 DEBUG [RS:0;7d4f3b9a7081:45421-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 85094 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-02T21:11:47,705 DEBUG [RS:0;7d4f3b9a7081:45421-shortCompactions-0 {}] regionserver.HStore(1540): 9f99cceab3284d89ba5d313b73c9eac6/info is initiating minor compaction (all files) 2024-12-02T21:11:47,705 INFO [RS:0;7d4f3b9a7081:45421-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9f99cceab3284d89ba5d313b73c9eac6/info in TestLogRolling-testLogRolling,,1733173883499.9f99cceab3284d89ba5d313b73c9eac6. 2024-12-02T21:11:47,705 INFO [RS:0;7d4f3b9a7081:45421-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/9f99cceab3284d89ba5d313b73c9eac6/info/852de29748204b5f88ce0fa139ec99f1, hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/9f99cceab3284d89ba5d313b73c9eac6/info/ab3aefbbabdd45a49a2fc8a7189b4959, hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/9f99cceab3284d89ba5d313b73c9eac6/info/da7471d7f5b847c6bd7743cc3ae3d9f8] into tmpdir=hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/9f99cceab3284d89ba5d313b73c9eac6/.tmp, totalSize=83.1 K 2024-12-02T21:11:47,706 DEBUG [RS:0;7d4f3b9a7081:45421-shortCompactions-0 {}] compactions.Compactor(224): Compacting 852de29748204b5f88ce0fa139ec99f1, keycount=37, bloomtype=ROW, size=43.9 K, encoding=NONE, compression=NONE, seqNum=47, earliestPutTs=1733173893515 2024-12-02T21:11:47,706 DEBUG [RS:0;7d4f3b9a7081:45421-shortCompactions-0 {}] compactions.Compactor(224): Compacting ab3aefbbabdd45a49a2fc8a7189b4959, keycount=21, bloomtype=ROW, size=27.0 K, encoding=NONE, compression=NONE, seqNum=71, earliestPutTs=1733173905639 2024-12-02T21:11:47,707 DEBUG [RS:0;7d4f3b9a7081:45421-shortCompactions-0 {}] compactions.Compactor(224): Compacting da7471d7f5b847c6bd7743cc3ae3d9f8, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=82, earliestPutTs=1733173905668 2024-12-02T21:11:47,708 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/9f99cceab3284d89ba5d313b73c9eac6/.tmp/info/d64c08c83314441a883c6723a70aae97 is 1080, key is row0066/info:/1733173907679/Put/seqid=0 2024-12-02T21:11:47,714 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46855 is added to blk_1073741845_1021 (size=24376) 2024-12-02T21:11:47,714 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32945 is added to blk_1073741845_1021 (size=24376) 2024-12-02T21:11:47,716 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=18.91 KB at sequenceid=103 (bloomFilter=true), to=hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/9f99cceab3284d89ba5d313b73c9eac6/.tmp/info/d64c08c83314441a883c6723a70aae97 2024-12-02T21:11:47,721 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45421 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=9f99cceab3284d89ba5d313b73c9eac6, server=7d4f3b9a7081,45421,1733173881847 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T21:11:47,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45421 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:34180 deadline: 1733173917720, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=9f99cceab3284d89ba5d313b73c9eac6, server=7d4f3b9a7081,45421,1733173881847 2024-12-02T21:11:47,722 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/9f99cceab3284d89ba5d313b73c9eac6/.tmp/info/d64c08c83314441a883c6723a70aae97 as hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/9f99cceab3284d89ba5d313b73c9eac6/info/d64c08c83314441a883c6723a70aae97 2024-12-02T21:11:47,724 INFO [RS:0;7d4f3b9a7081:45421-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9f99cceab3284d89ba5d313b73c9eac6#info#compaction#48 average throughput is 16.67 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-02T21:11:47,724 DEBUG [RS:0;7d4f3b9a7081:45421-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/9f99cceab3284d89ba5d313b73c9eac6/.tmp/info/969f3a1a960044a68450ea2d7111971e is 1080, key is row0001/info:/1733173893515/Put/seqid=0 2024-12-02T21:11:47,728 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46855 is added to blk_1073741846_1022 (size=75378) 2024-12-02T21:11:47,728 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/9f99cceab3284d89ba5d313b73c9eac6/info/d64c08c83314441a883c6723a70aae97, entries=18, sequenceid=103, filesize=23.8 K 2024-12-02T21:11:47,728 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32945 is added to blk_1073741846_1022 (size=75378) 2024-12-02T21:11:47,729 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~18.91 KB/19368, heapSize ~20.48 KB/20976, currentSize=11.56 KB/11836 for 9f99cceab3284d89ba5d313b73c9eac6 in 25ms, sequenceid=103, compaction requested=false 2024-12-02T21:11:47,729 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 9f99cceab3284d89ba5d313b73c9eac6: 2024-12-02T21:11:47,729 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(109): Should split because info size=106.9 K, sizeToCheck=16.0 K 2024-12-02T21:11:47,729 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-02T21:11:47,729 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/9f99cceab3284d89ba5d313b73c9eac6/info/852de29748204b5f88ce0fa139ec99f1 because midkey is the same as first or last row 2024-12-02T21:11:47,733 DEBUG [RS:0;7d4f3b9a7081:45421-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/9f99cceab3284d89ba5d313b73c9eac6/.tmp/info/969f3a1a960044a68450ea2d7111971e as hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/9f99cceab3284d89ba5d313b73c9eac6/info/969f3a1a960044a68450ea2d7111971e 2024-12-02T21:11:47,740 INFO [RS:0;7d4f3b9a7081:45421-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 9f99cceab3284d89ba5d313b73c9eac6/info of 9f99cceab3284d89ba5d313b73c9eac6 into 969f3a1a960044a68450ea2d7111971e(size=73.6 K), total size for store is 97.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T21:11:47,740 DEBUG [RS:0;7d4f3b9a7081:45421-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9f99cceab3284d89ba5d313b73c9eac6: 2024-12-02T21:11:47,740 INFO [RS:0;7d4f3b9a7081:45421-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1733173883499.9f99cceab3284d89ba5d313b73c9eac6., storeName=9f99cceab3284d89ba5d313b73c9eac6/info, priority=13, startTime=1733173907704; duration=0sec 2024-12-02T21:11:47,740 DEBUG [RS:0;7d4f3b9a7081:45421-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(109): Should split because info size=97.4 K, sizeToCheck=16.0 K 2024-12-02T21:11:47,740 DEBUG [RS:0;7d4f3b9a7081:45421-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-02T21:11:47,741 DEBUG [RS:0;7d4f3b9a7081:45421-shortCompactions-0 {}] regionserver.CompactSplit(239): Splitting TestLogRolling-testLogRolling,,1733173883499.9f99cceab3284d89ba5d313b73c9eac6., compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T21:11:47,741 DEBUG [RS:0;7d4f3b9a7081:45421-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T21:11:47,741 DEBUG [RS:0;7d4f3b9a7081:45421-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9f99cceab3284d89ba5d313b73c9eac6:info 2024-12-02T21:11:47,742 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44741 {}] assignment.AssignmentManager(1346): Split request from 7d4f3b9a7081,45421,1733173881847, parent={ENCODED => 9f99cceab3284d89ba5d313b73c9eac6, NAME => 'TestLogRolling-testLogRolling,,1733173883499.9f99cceab3284d89ba5d313b73c9eac6.', STARTKEY => '', ENDKEY => ''}, splitKey=row0062 2024-12-02T21:11:47,745 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44741 {}] assignment.SplitTableRegionProcedure(223): Splittable=true state=OPEN, location=7d4f3b9a7081,45421,1733173881847 2024-12-02T21:11:47,749 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44741 {}] procedure2.ProcedureExecutor(1098): Stored pid=12, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=9f99cceab3284d89ba5d313b73c9eac6, daughterA=cedffaa969ea2cf9d508ee3a4b7624cb, daughterB=b778fbd7043cd29c14974d14143b03a8 2024-12-02T21:11:47,750 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=12, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=9f99cceab3284d89ba5d313b73c9eac6, daughterA=cedffaa969ea2cf9d508ee3a4b7624cb, daughterB=b778fbd7043cd29c14974d14143b03a8 2024-12-02T21:11:47,750 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=12, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=9f99cceab3284d89ba5d313b73c9eac6, daughterA=cedffaa969ea2cf9d508ee3a4b7624cb, daughterB=b778fbd7043cd29c14974d14143b03a8 2024-12-02T21:11:47,750 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=12, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=9f99cceab3284d89ba5d313b73c9eac6, daughterA=cedffaa969ea2cf9d508ee3a4b7624cb, daughterB=b778fbd7043cd29c14974d14143b03a8 2024-12-02T21:11:47,756 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=13, ppid=12, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=9f99cceab3284d89ba5d313b73c9eac6, UNASSIGN}] 2024-12-02T21:11:47,757 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=13, ppid=12, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=9f99cceab3284d89ba5d313b73c9eac6, UNASSIGN 2024-12-02T21:11:47,758 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=13 updating hbase:meta row=9f99cceab3284d89ba5d313b73c9eac6, regionState=CLOSING, regionLocation=7d4f3b9a7081,45421,1733173881847 2024-12-02T21:11:47,760 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: true: evictOnSplit: true: evictOnClose: false 2024-12-02T21:11:47,760 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=14, ppid=13, state=RUNNABLE; CloseRegionProcedure 9f99cceab3284d89ba5d313b73c9eac6, server=7d4f3b9a7081,45421,1733173881847}] 2024-12-02T21:11:47,915 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 7d4f3b9a7081,45421,1733173881847 2024-12-02T21:11:47,918 INFO [RS_CLOSE_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_REGION, pid=14}] handler.UnassignRegionHandler(124): Close 9f99cceab3284d89ba5d313b73c9eac6 2024-12-02T21:11:47,918 DEBUG [RS_CLOSE_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_REGION, pid=14}] handler.UnassignRegionHandler(138): Unassign region: split region: true: evictCache: true 2024-12-02T21:11:47,919 DEBUG [RS_CLOSE_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_REGION, pid=14}] regionserver.HRegion(1681): Closing 9f99cceab3284d89ba5d313b73c9eac6, disabling compactions & flushes 2024-12-02T21:11:47,919 INFO [RS_CLOSE_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_REGION, pid=14}] regionserver.HRegion(1703): Closing region TestLogRolling-testLogRolling,,1733173883499.9f99cceab3284d89ba5d313b73c9eac6. 2024-12-02T21:11:47,919 DEBUG [RS_CLOSE_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_REGION, pid=14}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestLogRolling-testLogRolling,,1733173883499.9f99cceab3284d89ba5d313b73c9eac6. 2024-12-02T21:11:47,919 DEBUG [RS_CLOSE_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_REGION, pid=14}] regionserver.HRegion(1791): Acquired close lock on TestLogRolling-testLogRolling,,1733173883499.9f99cceab3284d89ba5d313b73c9eac6. after waiting 0 ms 2024-12-02T21:11:47,919 DEBUG [RS_CLOSE_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_REGION, pid=14}] regionserver.HRegion(1801): Updates disabled for region TestLogRolling-testLogRolling,,1733173883499.9f99cceab3284d89ba5d313b73c9eac6. 2024-12-02T21:11:47,919 INFO [RS_CLOSE_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_REGION, pid=14}] regionserver.HRegion(2837): Flushing 9f99cceab3284d89ba5d313b73c9eac6 1/1 column families, dataSize=11.56 KB heapSize=12.63 KB 2024-12-02T21:11:47,925 DEBUG [RS_CLOSE_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_REGION, pid=14}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/9f99cceab3284d89ba5d313b73c9eac6/.tmp/info/9b8e63b38a93463dba9473c9ef4b5816 is 1080, key is row0084/info:/1733173907704/Put/seqid=0 2024-12-02T21:11:47,932 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32945 is added to blk_1073741847_1023 (size=16817) 2024-12-02T21:11:47,932 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46855 is added to blk_1073741847_1023 (size=16817) 2024-12-02T21:11:47,933 INFO [RS_CLOSE_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_REGION, pid=14}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.56 KB at sequenceid=118 (bloomFilter=true), to=hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/9f99cceab3284d89ba5d313b73c9eac6/.tmp/info/9b8e63b38a93463dba9473c9ef4b5816 2024-12-02T21:11:47,941 DEBUG [RS_CLOSE_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_REGION, pid=14}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/9f99cceab3284d89ba5d313b73c9eac6/.tmp/info/9b8e63b38a93463dba9473c9ef4b5816 as hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/9f99cceab3284d89ba5d313b73c9eac6/info/9b8e63b38a93463dba9473c9ef4b5816 2024-12-02T21:11:47,948 INFO [RS_CLOSE_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_REGION, pid=14}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/9f99cceab3284d89ba5d313b73c9eac6/info/9b8e63b38a93463dba9473c9ef4b5816, entries=11, sequenceid=118, filesize=16.4 K 2024-12-02T21:11:47,949 INFO [RS_CLOSE_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_REGION, pid=14}] regionserver.HRegion(3040): Finished flush of dataSize ~11.56 KB/11836, heapSize ~12.61 KB/12912, currentSize=0 B/0 for 9f99cceab3284d89ba5d313b73c9eac6 in 30ms, sequenceid=118, compaction requested=true 2024-12-02T21:11:47,950 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733173883499.9f99cceab3284d89ba5d313b73c9eac6.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/9f99cceab3284d89ba5d313b73c9eac6/info/401f5a7054b54ed3b9f182af1a7f6c1d, hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/9f99cceab3284d89ba5d313b73c9eac6/info/adf5ef9ccb7341f7bd5a6338e57b5d33, hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/9f99cceab3284d89ba5d313b73c9eac6/info/852de29748204b5f88ce0fa139ec99f1, hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/9f99cceab3284d89ba5d313b73c9eac6/info/ee110b841d4f4711856bd9894db27ad2, hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/9f99cceab3284d89ba5d313b73c9eac6/info/ab3aefbbabdd45a49a2fc8a7189b4959, hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/9f99cceab3284d89ba5d313b73c9eac6/info/da7471d7f5b847c6bd7743cc3ae3d9f8] to archive 2024-12-02T21:11:47,951 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733173883499.9f99cceab3284d89ba5d313b73c9eac6.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-02T21:11:47,953 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733173883499.9f99cceab3284d89ba5d313b73c9eac6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/9f99cceab3284d89ba5d313b73c9eac6/info/401f5a7054b54ed3b9f182af1a7f6c1d to hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/archive/data/default/TestLogRolling-testLogRolling/9f99cceab3284d89ba5d313b73c9eac6/info/401f5a7054b54ed3b9f182af1a7f6c1d 2024-12-02T21:11:47,954 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733173883499.9f99cceab3284d89ba5d313b73c9eac6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/9f99cceab3284d89ba5d313b73c9eac6/info/adf5ef9ccb7341f7bd5a6338e57b5d33 to hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/archive/data/default/TestLogRolling-testLogRolling/9f99cceab3284d89ba5d313b73c9eac6/info/adf5ef9ccb7341f7bd5a6338e57b5d33 2024-12-02T21:11:47,956 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733173883499.9f99cceab3284d89ba5d313b73c9eac6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/9f99cceab3284d89ba5d313b73c9eac6/info/852de29748204b5f88ce0fa139ec99f1 to hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/archive/data/default/TestLogRolling-testLogRolling/9f99cceab3284d89ba5d313b73c9eac6/info/852de29748204b5f88ce0fa139ec99f1 2024-12-02T21:11:47,957 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733173883499.9f99cceab3284d89ba5d313b73c9eac6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/9f99cceab3284d89ba5d313b73c9eac6/info/ee110b841d4f4711856bd9894db27ad2 to hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/archive/data/default/TestLogRolling-testLogRolling/9f99cceab3284d89ba5d313b73c9eac6/info/ee110b841d4f4711856bd9894db27ad2 2024-12-02T21:11:47,958 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733173883499.9f99cceab3284d89ba5d313b73c9eac6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/9f99cceab3284d89ba5d313b73c9eac6/info/ab3aefbbabdd45a49a2fc8a7189b4959 to hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/archive/data/default/TestLogRolling-testLogRolling/9f99cceab3284d89ba5d313b73c9eac6/info/ab3aefbbabdd45a49a2fc8a7189b4959 2024-12-02T21:11:47,959 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733173883499.9f99cceab3284d89ba5d313b73c9eac6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/9f99cceab3284d89ba5d313b73c9eac6/info/da7471d7f5b847c6bd7743cc3ae3d9f8 to hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/archive/data/default/TestLogRolling-testLogRolling/9f99cceab3284d89ba5d313b73c9eac6/info/da7471d7f5b847c6bd7743cc3ae3d9f8 2024-12-02T21:11:47,963 DEBUG [RS_CLOSE_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_REGION, pid=14}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/9f99cceab3284d89ba5d313b73c9eac6/recovered.edits/121.seqid, newMaxSeqId=121, maxSeqId=1 2024-12-02T21:11:47,964 INFO [RS_CLOSE_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_REGION, pid=14}] regionserver.HRegion(1922): Closed TestLogRolling-testLogRolling,,1733173883499.9f99cceab3284d89ba5d313b73c9eac6. 2024-12-02T21:11:47,964 DEBUG [RS_CLOSE_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_REGION, pid=14}] regionserver.HRegion(1635): Region close journal for 9f99cceab3284d89ba5d313b73c9eac6: 2024-12-02T21:11:47,966 INFO [RS_CLOSE_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_REGION, pid=14}] handler.UnassignRegionHandler(170): Closed 9f99cceab3284d89ba5d313b73c9eac6 2024-12-02T21:11:47,966 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=13 updating hbase:meta row=9f99cceab3284d89ba5d313b73c9eac6, regionState=CLOSED 2024-12-02T21:11:47,970 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=14, resume processing ppid=13 2024-12-02T21:11:47,970 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=14, ppid=13, state=SUCCESS; CloseRegionProcedure 9f99cceab3284d89ba5d313b73c9eac6, server=7d4f3b9a7081,45421,1733173881847 in 208 msec 2024-12-02T21:11:47,971 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=13, resume processing ppid=12 2024-12-02T21:11:47,971 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=13, ppid=12, state=SUCCESS; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=9f99cceab3284d89ba5d313b73c9eac6, UNASSIGN in 214 msec 2024-12-02T21:11:47,991 DEBUG [PEWorker-5 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T21:11:47,992 INFO [PEWorker-5 {}] assignment.SplitTableRegionProcedure(728): pid=12 splitting 3 storefiles, region=9f99cceab3284d89ba5d313b73c9eac6, threads=3 2024-12-02T21:11:47,993 DEBUG [StoreFileSplitter-pool-0 {}] assignment.SplitTableRegionProcedure(823): pid=12 splitting started for store file: hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/9f99cceab3284d89ba5d313b73c9eac6/info/969f3a1a960044a68450ea2d7111971e for region: 9f99cceab3284d89ba5d313b73c9eac6 2024-12-02T21:11:47,993 DEBUG [StoreFileSplitter-pool-1 {}] assignment.SplitTableRegionProcedure(823): pid=12 splitting started for store file: hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/9f99cceab3284d89ba5d313b73c9eac6/info/9b8e63b38a93463dba9473c9ef4b5816 for region: 9f99cceab3284d89ba5d313b73c9eac6 2024-12-02T21:11:47,993 DEBUG [StoreFileSplitter-pool-2 {}] assignment.SplitTableRegionProcedure(823): pid=12 splitting started for store file: hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/9f99cceab3284d89ba5d313b73c9eac6/info/d64c08c83314441a883c6723a70aae97 for region: 9f99cceab3284d89ba5d313b73c9eac6 2024-12-02T21:11:48,002 DEBUG [StoreFileSplitter-pool-2 {}] regionserver.HRegionFileSystem(650): Will create HFileLink file for hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/9f99cceab3284d89ba5d313b73c9eac6/info/d64c08c83314441a883c6723a70aae97, top=true 2024-12-02T21:11:48,003 DEBUG [StoreFileSplitter-pool-1 {}] regionserver.HRegionFileSystem(650): Will create HFileLink file for hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/9f99cceab3284d89ba5d313b73c9eac6/info/9b8e63b38a93463dba9473c9ef4b5816, top=true 2024-12-02T21:11:48,007 INFO [StoreFileSplitter-pool-2 {}] regionserver.HRegionFileSystem(691): Created linkFile:hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/b778fbd7043cd29c14974d14143b03a8/info/TestLogRolling-testLogRolling=9f99cceab3284d89ba5d313b73c9eac6-d64c08c83314441a883c6723a70aae97 for child: b778fbd7043cd29c14974d14143b03a8, parent: 9f99cceab3284d89ba5d313b73c9eac6 2024-12-02T21:11:48,007 DEBUG [StoreFileSplitter-pool-2 {}] assignment.SplitTableRegionProcedure(834): pid=12 splitting complete for store file: hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/9f99cceab3284d89ba5d313b73c9eac6/info/d64c08c83314441a883c6723a70aae97 for region: 9f99cceab3284d89ba5d313b73c9eac6 2024-12-02T21:11:48,012 INFO [StoreFileSplitter-pool-1 {}] regionserver.HRegionFileSystem(691): Created linkFile:hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/b778fbd7043cd29c14974d14143b03a8/info/TestLogRolling-testLogRolling=9f99cceab3284d89ba5d313b73c9eac6-9b8e63b38a93463dba9473c9ef4b5816 for child: b778fbd7043cd29c14974d14143b03a8, parent: 9f99cceab3284d89ba5d313b73c9eac6 2024-12-02T21:11:48,012 DEBUG [StoreFileSplitter-pool-1 {}] assignment.SplitTableRegionProcedure(834): pid=12 splitting complete for store file: hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/9f99cceab3284d89ba5d313b73c9eac6/info/9b8e63b38a93463dba9473c9ef4b5816 for region: 9f99cceab3284d89ba5d313b73c9eac6 2024-12-02T21:11:48,013 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46855 is added to blk_1073741848_1024 (size=27) 2024-12-02T21:11:48,013 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32945 is added to blk_1073741848_1024 (size=27) 2024-12-02T21:11:48,024 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46855 is added to blk_1073741849_1025 (size=27) 2024-12-02T21:11:48,024 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32945 is added to blk_1073741849_1025 (size=27) 2024-12-02T21:11:48,024 DEBUG [StoreFileSplitter-pool-0 {}] assignment.SplitTableRegionProcedure(834): pid=12 splitting complete for store file: hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/9f99cceab3284d89ba5d313b73c9eac6/info/969f3a1a960044a68450ea2d7111971e for region: 9f99cceab3284d89ba5d313b73c9eac6 2024-12-02T21:11:48,025 DEBUG [PEWorker-5 {}] assignment.SplitTableRegionProcedure(802): pid=12 split storefiles for region 9f99cceab3284d89ba5d313b73c9eac6 Daughter A: [hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/cedffaa969ea2cf9d508ee3a4b7624cb/info/969f3a1a960044a68450ea2d7111971e.9f99cceab3284d89ba5d313b73c9eac6] storefiles, Daughter B: [hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/b778fbd7043cd29c14974d14143b03a8/info/969f3a1a960044a68450ea2d7111971e.9f99cceab3284d89ba5d313b73c9eac6, hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/b778fbd7043cd29c14974d14143b03a8/info/TestLogRolling-testLogRolling=9f99cceab3284d89ba5d313b73c9eac6-9b8e63b38a93463dba9473c9ef4b5816, hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/b778fbd7043cd29c14974d14143b03a8/info/TestLogRolling-testLogRolling=9f99cceab3284d89ba5d313b73c9eac6-d64c08c83314441a883c6723a70aae97] storefiles. 2024-12-02T21:11:48,032 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46855 is added to blk_1073741850_1026 (size=71) 2024-12-02T21:11:48,033 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32945 is added to blk_1073741850_1026 (size=71) 2024-12-02T21:11:48,035 DEBUG [PEWorker-5 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T21:11:48,047 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32945 is added to blk_1073741851_1027 (size=71) 2024-12-02T21:11:48,048 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46855 is added to blk_1073741851_1027 (size=71) 2024-12-02T21:11:48,049 DEBUG [PEWorker-5 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T21:11:48,057 DEBUG [PEWorker-5 {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/cedffaa969ea2cf9d508ee3a4b7624cb/recovered.edits/121.seqid, newMaxSeqId=121, maxSeqId=-1 2024-12-02T21:11:48,059 DEBUG [PEWorker-5 {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/b778fbd7043cd29c14974d14143b03a8/recovered.edits/121.seqid, newMaxSeqId=121, maxSeqId=-1 2024-12-02T21:11:48,061 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,,1733173883499.9f99cceab3284d89ba5d313b73c9eac6.","families":{"info":[{"qualifier":"regioninfo","vlen":63,"tag":[],"timestamp":"1733173908061"},{"qualifier":"splitA","vlen":70,"tag":[],"timestamp":"1733173908061"},{"qualifier":"splitB","vlen":70,"tag":[],"timestamp":"1733173908061"}]},"ts":"1733173908061"} 2024-12-02T21:11:48,061 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,,1733173907745.cedffaa969ea2cf9d508ee3a4b7624cb.","families":{"info":[{"qualifier":"regioninfo","vlen":70,"tag":[],"timestamp":"1733173908061"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733173908061"},{"qualifier":"seqnumDuringOpen","vlen":8,"tag":[],"timestamp":"1733173908061"}]},"ts":"1733173908061"} 2024-12-02T21:11:48,061 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,row0062,1733173907745.b778fbd7043cd29c14974d14143b03a8.","families":{"info":[{"qualifier":"regioninfo","vlen":70,"tag":[],"timestamp":"1733173908061"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733173908061"},{"qualifier":"seqnumDuringOpen","vlen":8,"tag":[],"timestamp":"1733173908061"}]},"ts":"1733173908061"} 2024-12-02T21:11:48,086 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45421 {}] regionserver.HRegion(8581): Flush requested on 1588230740 2024-12-02T21:11:48,086 DEBUG [MemStoreFlusher.0 {}] regionserver.FlushAllLargeStoresPolicy(69): Since none of the CFs were above the size, flushing all. 2024-12-02T21:11:48,086 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 1588230740 3/3 column families, dataSize=4.75 KB heapSize=8.29 KB 2024-12-02T21:11:48,090 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=15, ppid=12, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=cedffaa969ea2cf9d508ee3a4b7624cb, ASSIGN}, {pid=16, ppid=12, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=b778fbd7043cd29c14974d14143b03a8, ASSIGN}] 2024-12-02T21:11:48,091 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=16, ppid=12, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=b778fbd7043cd29c14974d14143b03a8, ASSIGN 2024-12-02T21:11:48,092 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=15, ppid=12, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=cedffaa969ea2cf9d508ee3a4b7624cb, ASSIGN 2024-12-02T21:11:48,092 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(264): Starting pid=16, ppid=12, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=b778fbd7043cd29c14974d14143b03a8, ASSIGN; state=SPLITTING_NEW, location=7d4f3b9a7081,45421,1733173881847; forceNewPlan=false, retain=false 2024-12-02T21:11:48,092 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(264): Starting pid=15, ppid=12, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=cedffaa969ea2cf9d508ee3a4b7624cb, ASSIGN; state=SPLITTING_NEW, location=7d4f3b9a7081,45421,1733173881847; forceNewPlan=false, retain=false 2024-12-02T21:11:48,101 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/hbase/meta/1588230740/.tmp/info/c3459bc1589d4023893956dbcd003b13 is 193, key is TestLogRolling-testLogRolling,row0062,1733173907745.b778fbd7043cd29c14974d14143b03a8./info:regioninfo/1733173908061/Put/seqid=0 2024-12-02T21:11:48,105 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46855 is added to blk_1073741852_1028 (size=9423) 2024-12-02T21:11:48,105 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32945 is added to blk_1073741852_1028 (size=9423) 2024-12-02T21:11:48,106 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.54 KB at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/hbase/meta/1588230740/.tmp/info/c3459bc1589d4023893956dbcd003b13 2024-12-02T21:11:48,123 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/hbase/meta/1588230740/.tmp/table/c53eeb5ec2804f2ea8343b905a0d1260 is 65, key is TestLogRolling-testLogRolling/table:state/1733173883874/Put/seqid=0 2024-12-02T21:11:48,127 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32945 is added to blk_1073741853_1029 (size=5412) 2024-12-02T21:11:48,128 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46855 is added to blk_1073741853_1029 (size=5412) 2024-12-02T21:11:48,128 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=216 B at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/hbase/meta/1588230740/.tmp/table/c53eeb5ec2804f2ea8343b905a0d1260 2024-12-02T21:11:48,134 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/hbase/meta/1588230740/.tmp/info/c3459bc1589d4023893956dbcd003b13 as hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/hbase/meta/1588230740/info/c3459bc1589d4023893956dbcd003b13 2024-12-02T21:11:48,140 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/hbase/meta/1588230740/info/c3459bc1589d4023893956dbcd003b13, entries=29, sequenceid=17, filesize=9.2 K 2024-12-02T21:11:48,141 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/hbase/meta/1588230740/.tmp/table/c53eeb5ec2804f2ea8343b905a0d1260 as hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/hbase/meta/1588230740/table/c53eeb5ec2804f2ea8343b905a0d1260 2024-12-02T21:11:48,146 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/hbase/meta/1588230740/table/c53eeb5ec2804f2ea8343b905a0d1260, entries=4, sequenceid=17, filesize=5.3 K 2024-12-02T21:11:48,147 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~4.75 KB/4869, heapSize ~8.01 KB/8200, currentSize=0 B/0 for 1588230740 in 60ms, sequenceid=17, compaction requested=false 2024-12-02T21:11:48,147 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 1588230740: 2024-12-02T21:11:48,243 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=15 updating hbase:meta row=cedffaa969ea2cf9d508ee3a4b7624cb, regionState=OPENING, regionLocation=7d4f3b9a7081,45421,1733173881847 2024-12-02T21:11:48,243 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=16 updating hbase:meta row=b778fbd7043cd29c14974d14143b03a8, regionState=OPENING, regionLocation=7d4f3b9a7081,45421,1733173881847 2024-12-02T21:11:48,245 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=17, ppid=15, state=RUNNABLE; OpenRegionProcedure cedffaa969ea2cf9d508ee3a4b7624cb, server=7d4f3b9a7081,45421,1733173881847}] 2024-12-02T21:11:48,246 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=18, ppid=16, state=RUNNABLE; OpenRegionProcedure b778fbd7043cd29c14974d14143b03a8, server=7d4f3b9a7081,45421,1733173881847}] 2024-12-02T21:11:48,398 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 7d4f3b9a7081,45421,1733173881847 2024-12-02T21:11:48,401 INFO [RS_OPEN_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_REGION, pid=18}] handler.AssignRegionHandler(135): Open TestLogRolling-testLogRolling,row0062,1733173907745.b778fbd7043cd29c14974d14143b03a8. 2024-12-02T21:11:48,401 DEBUG [RS_OPEN_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_REGION, pid=18}] regionserver.HRegion(7285): Opening region: {ENCODED => b778fbd7043cd29c14974d14143b03a8, NAME => 'TestLogRolling-testLogRolling,row0062,1733173907745.b778fbd7043cd29c14974d14143b03a8.', STARTKEY => 'row0062', ENDKEY => ''} 2024-12-02T21:11:48,402 DEBUG [RS_OPEN_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_REGION, pid=18}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling b778fbd7043cd29c14974d14143b03a8 2024-12-02T21:11:48,402 DEBUG [RS_OPEN_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_REGION, pid=18}] regionserver.HRegion(894): Instantiated TestLogRolling-testLogRolling,row0062,1733173907745.b778fbd7043cd29c14974d14143b03a8.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T21:11:48,402 DEBUG [RS_OPEN_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_REGION, pid=18}] regionserver.HRegion(7327): checking encryption for b778fbd7043cd29c14974d14143b03a8 2024-12-02T21:11:48,402 DEBUG [RS_OPEN_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_REGION, pid=18}] regionserver.HRegion(7330): checking classloading for b778fbd7043cd29c14974d14143b03a8 2024-12-02T21:11:48,403 INFO [StoreOpener-b778fbd7043cd29c14974d14143b03a8-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region b778fbd7043cd29c14974d14143b03a8 2024-12-02T21:11:48,404 INFO [StoreOpener-b778fbd7043cd29c14974d14143b03a8-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region b778fbd7043cd29c14974d14143b03a8 columnFamilyName info 2024-12-02T21:11:48,404 DEBUG [StoreOpener-b778fbd7043cd29c14974d14143b03a8-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T21:11:48,418 DEBUG [StoreOpener-b778fbd7043cd29c14974d14143b03a8-1 {}] regionserver.StoreEngine(277): loaded hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/b778fbd7043cd29c14974d14143b03a8/info/969f3a1a960044a68450ea2d7111971e.9f99cceab3284d89ba5d313b73c9eac6->hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/9f99cceab3284d89ba5d313b73c9eac6/info/969f3a1a960044a68450ea2d7111971e-top 2024-12-02T21:11:48,423 DEBUG [StoreOpener-b778fbd7043cd29c14974d14143b03a8-1 {}] regionserver.StoreEngine(277): loaded hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/b778fbd7043cd29c14974d14143b03a8/info/TestLogRolling-testLogRolling=9f99cceab3284d89ba5d313b73c9eac6-9b8e63b38a93463dba9473c9ef4b5816 2024-12-02T21:11:48,427 DEBUG [StoreOpener-b778fbd7043cd29c14974d14143b03a8-1 {}] regionserver.StoreEngine(277): loaded hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/b778fbd7043cd29c14974d14143b03a8/info/TestLogRolling-testLogRolling=9f99cceab3284d89ba5d313b73c9eac6-d64c08c83314441a883c6723a70aae97 2024-12-02T21:11:48,427 INFO [StoreOpener-b778fbd7043cd29c14974d14143b03a8-1 {}] regionserver.HStore(327): Store=b778fbd7043cd29c14974d14143b03a8/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T21:11:48,428 DEBUG [RS_OPEN_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_REGION, pid=18}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/b778fbd7043cd29c14974d14143b03a8 2024-12-02T21:11:48,429 DEBUG [RS_OPEN_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_REGION, pid=18}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/b778fbd7043cd29c14974d14143b03a8 2024-12-02T21:11:48,431 DEBUG [RS_OPEN_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_REGION, pid=18}] regionserver.HRegion(1085): writing seq id for b778fbd7043cd29c14974d14143b03a8 2024-12-02T21:11:48,432 INFO [RS_OPEN_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_REGION, pid=18}] regionserver.HRegion(1102): Opened b778fbd7043cd29c14974d14143b03a8; next sequenceid=122; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=820874, jitterRate=0.0437958687543869}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-02T21:11:48,433 DEBUG [RS_OPEN_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_REGION, pid=18}] regionserver.HRegion(1001): Region open journal for b778fbd7043cd29c14974d14143b03a8: 2024-12-02T21:11:48,434 INFO [RS_OPEN_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_REGION, pid=18}] regionserver.HRegionServer(2601): Post open deploy tasks for TestLogRolling-testLogRolling,row0062,1733173907745.b778fbd7043cd29c14974d14143b03a8., pid=18, masterSystemTime=1733173908397 2024-12-02T21:11:48,434 DEBUG [RS_OPEN_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_REGION, pid=18}] regionserver.CompactSplit(403): Add compact mark for store b778fbd7043cd29c14974d14143b03a8:info, priority=-2147483648, current under compaction store size is 1 2024-12-02T21:11:48,434 DEBUG [RS:0;7d4f3b9a7081:45421-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-02T21:11:48,434 DEBUG [RS_OPEN_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_REGION, pid=18}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: Opening Region; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T21:11:48,436 INFO [RS:0;7d4f3b9a7081:45421-shortCompactions-0 {}] regionserver.HStore(1526): Keeping/Overriding Compaction request priority to -2147482648 for CF info since it belongs to recently split daughter region TestLogRolling-testLogRolling,row0062,1733173907745.b778fbd7043cd29c14974d14143b03a8. 2024-12-02T21:11:48,436 DEBUG [RS:0;7d4f3b9a7081:45421-shortCompactions-0 {}] regionserver.HStore(1540): b778fbd7043cd29c14974d14143b03a8/info is initiating minor compaction (all files) 2024-12-02T21:11:48,436 INFO [RS:0;7d4f3b9a7081:45421-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b778fbd7043cd29c14974d14143b03a8/info in TestLogRolling-testLogRolling,row0062,1733173907745.b778fbd7043cd29c14974d14143b03a8. 2024-12-02T21:11:48,436 DEBUG [RS_OPEN_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_REGION, pid=18}] regionserver.HRegionServer(2628): Finished post open deploy task for TestLogRolling-testLogRolling,row0062,1733173907745.b778fbd7043cd29c14974d14143b03a8. 2024-12-02T21:11:48,436 INFO [RS_OPEN_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_REGION, pid=18}] handler.AssignRegionHandler(164): Opened TestLogRolling-testLogRolling,row0062,1733173907745.b778fbd7043cd29c14974d14143b03a8. 2024-12-02T21:11:48,436 INFO [RS_OPEN_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] handler.AssignRegionHandler(135): Open TestLogRolling-testLogRolling,,1733173907745.cedffaa969ea2cf9d508ee3a4b7624cb. 2024-12-02T21:11:48,436 INFO [RS:0;7d4f3b9a7081:45421-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/b778fbd7043cd29c14974d14143b03a8/info/969f3a1a960044a68450ea2d7111971e.9f99cceab3284d89ba5d313b73c9eac6->hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/9f99cceab3284d89ba5d313b73c9eac6/info/969f3a1a960044a68450ea2d7111971e-top, hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/b778fbd7043cd29c14974d14143b03a8/info/TestLogRolling-testLogRolling=9f99cceab3284d89ba5d313b73c9eac6-d64c08c83314441a883c6723a70aae97, hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/b778fbd7043cd29c14974d14143b03a8/info/TestLogRolling-testLogRolling=9f99cceab3284d89ba5d313b73c9eac6-9b8e63b38a93463dba9473c9ef4b5816] into tmpdir=hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/b778fbd7043cd29c14974d14143b03a8/.tmp, totalSize=113.8 K 2024-12-02T21:11:48,436 DEBUG [RS_OPEN_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] regionserver.HRegion(7285): Opening region: {ENCODED => cedffaa969ea2cf9d508ee3a4b7624cb, NAME => 'TestLogRolling-testLogRolling,,1733173907745.cedffaa969ea2cf9d508ee3a4b7624cb.', STARTKEY => '', ENDKEY => 'row0062'} 2024-12-02T21:11:48,437 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=16 updating hbase:meta row=b778fbd7043cd29c14974d14143b03a8, regionState=OPEN, openSeqNum=122, regionLocation=7d4f3b9a7081,45421,1733173881847 2024-12-02T21:11:48,437 DEBUG [RS_OPEN_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling cedffaa969ea2cf9d508ee3a4b7624cb 2024-12-02T21:11:48,437 DEBUG [RS_OPEN_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] regionserver.HRegion(894): Instantiated TestLogRolling-testLogRolling,,1733173907745.cedffaa969ea2cf9d508ee3a4b7624cb.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T21:11:48,437 DEBUG [RS_OPEN_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] regionserver.HRegion(7327): checking encryption for cedffaa969ea2cf9d508ee3a4b7624cb 2024-12-02T21:11:48,437 DEBUG [RS:0;7d4f3b9a7081:45421-shortCompactions-0 {}] compactions.Compactor(224): Compacting 969f3a1a960044a68450ea2d7111971e.9f99cceab3284d89ba5d313b73c9eac6, keycount=32, bloomtype=ROW, size=73.6 K, encoding=NONE, compression=NONE, seqNum=83, earliestPutTs=1733173893515 2024-12-02T21:11:48,437 DEBUG [RS_OPEN_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] regionserver.HRegion(7330): checking classloading for cedffaa969ea2cf9d508ee3a4b7624cb 2024-12-02T21:11:48,437 DEBUG [RS:0;7d4f3b9a7081:45421-shortCompactions-0 {}] compactions.Compactor(224): Compacting TestLogRolling-testLogRolling=9f99cceab3284d89ba5d313b73c9eac6-d64c08c83314441a883c6723a70aae97, keycount=18, bloomtype=ROW, size=23.8 K, encoding=NONE, compression=NONE, seqNum=103, earliestPutTs=1733173907679 2024-12-02T21:11:48,438 DEBUG [RS:0;7d4f3b9a7081:45421-shortCompactions-0 {}] compactions.Compactor(224): Compacting TestLogRolling-testLogRolling=9f99cceab3284d89ba5d313b73c9eac6-9b8e63b38a93463dba9473c9ef4b5816, keycount=11, bloomtype=ROW, size=16.4 K, encoding=NONE, compression=NONE, seqNum=118, earliestPutTs=1733173907704 2024-12-02T21:11:48,438 INFO [StoreOpener-cedffaa969ea2cf9d508ee3a4b7624cb-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region cedffaa969ea2cf9d508ee3a4b7624cb 2024-12-02T21:11:48,439 INFO [StoreOpener-cedffaa969ea2cf9d508ee3a4b7624cb-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region cedffaa969ea2cf9d508ee3a4b7624cb columnFamilyName info 2024-12-02T21:11:48,439 DEBUG [StoreOpener-cedffaa969ea2cf9d508ee3a4b7624cb-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T21:11:48,440 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=18, resume processing ppid=16 2024-12-02T21:11:48,440 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=18, ppid=16, state=SUCCESS; OpenRegionProcedure b778fbd7043cd29c14974d14143b03a8, server=7d4f3b9a7081,45421,1733173881847 in 192 msec 2024-12-02T21:11:48,442 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=16, ppid=12, state=SUCCESS; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=b778fbd7043cd29c14974d14143b03a8, ASSIGN in 351 msec 2024-12-02T21:11:48,448 DEBUG [StoreOpener-cedffaa969ea2cf9d508ee3a4b7624cb-1 {}] regionserver.StoreEngine(277): loaded hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/cedffaa969ea2cf9d508ee3a4b7624cb/info/969f3a1a960044a68450ea2d7111971e.9f99cceab3284d89ba5d313b73c9eac6->hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/9f99cceab3284d89ba5d313b73c9eac6/info/969f3a1a960044a68450ea2d7111971e-bottom 2024-12-02T21:11:48,449 INFO [StoreOpener-cedffaa969ea2cf9d508ee3a4b7624cb-1 {}] regionserver.HStore(327): Store=cedffaa969ea2cf9d508ee3a4b7624cb/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T21:11:48,449 DEBUG [RS_OPEN_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/cedffaa969ea2cf9d508ee3a4b7624cb 2024-12-02T21:11:48,451 DEBUG [RS_OPEN_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/cedffaa969ea2cf9d508ee3a4b7624cb 2024-12-02T21:11:48,453 DEBUG [RS_OPEN_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] regionserver.HRegion(1085): writing seq id for cedffaa969ea2cf9d508ee3a4b7624cb 2024-12-02T21:11:48,453 INFO [RS_OPEN_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] regionserver.HRegion(1102): Opened cedffaa969ea2cf9d508ee3a4b7624cb; next sequenceid=122; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=795257, jitterRate=0.011222273111343384}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-02T21:11:48,454 DEBUG [RS_OPEN_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] regionserver.HRegion(1001): Region open journal for cedffaa969ea2cf9d508ee3a4b7624cb: 2024-12-02T21:11:48,454 INFO [RS_OPEN_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] regionserver.HRegionServer(2601): Post open deploy tasks for TestLogRolling-testLogRolling,,1733173907745.cedffaa969ea2cf9d508ee3a4b7624cb., pid=17, masterSystemTime=1733173908397 2024-12-02T21:11:48,454 DEBUG [RS_OPEN_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] regionserver.CompactSplit(403): Add compact mark for store cedffaa969ea2cf9d508ee3a4b7624cb:info, priority=-2147483648, current under compaction store size is 2 2024-12-02T21:11:48,454 DEBUG [RS_OPEN_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: Opening Region; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T21:11:48,454 DEBUG [RS:0;7d4f3b9a7081:45421-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 1 store files, 0 compacting, 1 eligible, 16 blocking 2024-12-02T21:11:48,455 INFO [RS:0;7d4f3b9a7081:45421-longCompactions-0 {}] regionserver.HStore(1526): Keeping/Overriding Compaction request priority to -2147482648 for CF info since it belongs to recently split daughter region TestLogRolling-testLogRolling,,1733173907745.cedffaa969ea2cf9d508ee3a4b7624cb. 2024-12-02T21:11:48,455 DEBUG [RS:0;7d4f3b9a7081:45421-longCompactions-0 {}] regionserver.HStore(1540): cedffaa969ea2cf9d508ee3a4b7624cb/info is initiating minor compaction (all files) 2024-12-02T21:11:48,455 INFO [RS:0;7d4f3b9a7081:45421-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of cedffaa969ea2cf9d508ee3a4b7624cb/info in TestLogRolling-testLogRolling,,1733173907745.cedffaa969ea2cf9d508ee3a4b7624cb. 2024-12-02T21:11:48,455 INFO [RS:0;7d4f3b9a7081:45421-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/cedffaa969ea2cf9d508ee3a4b7624cb/info/969f3a1a960044a68450ea2d7111971e.9f99cceab3284d89ba5d313b73c9eac6->hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/9f99cceab3284d89ba5d313b73c9eac6/info/969f3a1a960044a68450ea2d7111971e-bottom] into tmpdir=hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/cedffaa969ea2cf9d508ee3a4b7624cb/.tmp, totalSize=73.6 K 2024-12-02T21:11:48,456 DEBUG [RS:0;7d4f3b9a7081:45421-longCompactions-0 {}] compactions.Compactor(224): Compacting 969f3a1a960044a68450ea2d7111971e.9f99cceab3284d89ba5d313b73c9eac6, keycount=32, bloomtype=ROW, size=73.6 K, encoding=NONE, compression=NONE, seqNum=82, earliestPutTs=1733173893515 2024-12-02T21:11:48,456 DEBUG [RS_OPEN_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] regionserver.HRegionServer(2628): Finished post open deploy task for TestLogRolling-testLogRolling,,1733173907745.cedffaa969ea2cf9d508ee3a4b7624cb. 2024-12-02T21:11:48,456 INFO [RS_OPEN_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] handler.AssignRegionHandler(164): Opened TestLogRolling-testLogRolling,,1733173907745.cedffaa969ea2cf9d508ee3a4b7624cb. 2024-12-02T21:11:48,456 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=15 updating hbase:meta row=cedffaa969ea2cf9d508ee3a4b7624cb, regionState=OPEN, openSeqNum=122, regionLocation=7d4f3b9a7081,45421,1733173881847 2024-12-02T21:11:48,459 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=17, resume processing ppid=15 2024-12-02T21:11:48,459 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=17, ppid=15, state=SUCCESS; OpenRegionProcedure cedffaa969ea2cf9d508ee3a4b7624cb, server=7d4f3b9a7081,45421,1733173881847 in 213 msec 2024-12-02T21:11:48,461 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=15, resume processing ppid=12 2024-12-02T21:11:48,461 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=15, ppid=12, state=SUCCESS; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=cedffaa969ea2cf9d508ee3a4b7624cb, ASSIGN in 369 msec 2024-12-02T21:11:48,461 INFO [RS:0;7d4f3b9a7081:45421-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): b778fbd7043cd29c14974d14143b03a8#info#compaction#52 average throughput is 33.86 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-02T21:11:48,462 DEBUG [RS:0;7d4f3b9a7081:45421-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/b778fbd7043cd29c14974d14143b03a8/.tmp/info/4ec11df23e7d45acaf2ccf627d5ba52e is 1080, key is row0062/info:/1733173905672/Put/seqid=0 2024-12-02T21:11:48,462 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=12, state=SUCCESS; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=9f99cceab3284d89ba5d313b73c9eac6, daughterA=cedffaa969ea2cf9d508ee3a4b7624cb, daughterB=b778fbd7043cd29c14974d14143b03a8 in 716 msec 2024-12-02T21:11:48,472 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32945 is added to blk_1073741854_1030 (size=40830) 2024-12-02T21:11:48,472 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46855 is added to blk_1073741854_1030 (size=40830) 2024-12-02T21:11:48,477 DEBUG [RS:0;7d4f3b9a7081:45421-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/b778fbd7043cd29c14974d14143b03a8/.tmp/info/4ec11df23e7d45acaf2ccf627d5ba52e as hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/b778fbd7043cd29c14974d14143b03a8/info/4ec11df23e7d45acaf2ccf627d5ba52e 2024-12-02T21:11:48,480 INFO [RS:0;7d4f3b9a7081:45421-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): cedffaa969ea2cf9d508ee3a4b7624cb#info#compaction#53 average throughput is 20.87 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-02T21:11:48,481 DEBUG [RS:0;7d4f3b9a7081:45421-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/cedffaa969ea2cf9d508ee3a4b7624cb/.tmp/info/253eae1774784af4b14f7ad35d75c47d is 1080, key is row0001/info:/1733173893515/Put/seqid=0 2024-12-02T21:11:48,483 INFO [RS:0;7d4f3b9a7081:45421-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in b778fbd7043cd29c14974d14143b03a8/info of b778fbd7043cd29c14974d14143b03a8 into 4ec11df23e7d45acaf2ccf627d5ba52e(size=39.9 K), total size for store is 39.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T21:11:48,483 DEBUG [RS:0;7d4f3b9a7081:45421-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b778fbd7043cd29c14974d14143b03a8: 2024-12-02T21:11:48,483 INFO [RS:0;7d4f3b9a7081:45421-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1733173907745.b778fbd7043cd29c14974d14143b03a8., storeName=b778fbd7043cd29c14974d14143b03a8/info, priority=13, startTime=1733173908434; duration=0sec 2024-12-02T21:11:48,483 DEBUG [RS:0;7d4f3b9a7081:45421-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T21:11:48,484 DEBUG [RS:0;7d4f3b9a7081:45421-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b778fbd7043cd29c14974d14143b03a8:info 2024-12-02T21:11:48,484 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.meta.1733173708307.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:11:48,485 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32945 is added to blk_1073741855_1031 (size=70862) 2024-12-02T21:11:48,486 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46855 is added to blk_1073741855_1031 (size=70862) 2024-12-02T21:11:48,492 DEBUG [RS:0;7d4f3b9a7081:45421-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/cedffaa969ea2cf9d508ee3a4b7624cb/.tmp/info/253eae1774784af4b14f7ad35d75c47d as hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/cedffaa969ea2cf9d508ee3a4b7624cb/info/253eae1774784af4b14f7ad35d75c47d 2024-12-02T21:11:48,498 INFO [RS:0;7d4f3b9a7081:45421-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 1 (all) file(s) in cedffaa969ea2cf9d508ee3a4b7624cb/info of cedffaa969ea2cf9d508ee3a4b7624cb into 253eae1774784af4b14f7ad35d75c47d(size=69.2 K), total size for store is 69.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T21:11:48,498 DEBUG [RS:0;7d4f3b9a7081:45421-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for cedffaa969ea2cf9d508ee3a4b7624cb: 2024-12-02T21:11:48,499 INFO [RS:0;7d4f3b9a7081:45421-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1733173907745.cedffaa969ea2cf9d508ee3a4b7624cb., storeName=cedffaa969ea2cf9d508ee3a4b7624cb/info, priority=15, startTime=1733173908454; duration=0sec 2024-12-02T21:11:48,499 DEBUG [RS:0;7d4f3b9a7081:45421-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T21:11:48,499 DEBUG [RS:0;7d4f3b9a7081:45421-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: cedffaa969ea2cf9d508ee3a4b7624cb:info 2024-12-02T21:11:49,485 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.meta.1733173708307.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:11:50,486 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.meta.1733173708307.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:11:51,486 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.meta.1733173708307.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:11:51,685 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-02T21:11:52,487 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.meta.1733173708307.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:11:52,965 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:11:52,965 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:11:52,965 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:11:52,966 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:11:52,966 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:11:52,966 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:11:52,987 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:11:52,987 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:11:52,987 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:11:52,987 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:11:52,987 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:11:52,987 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:11:52,989 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:11:52,989 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:11:52,990 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:11:52,991 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:11:53,488 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.meta.1733173708307.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:11:53,499 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-02T21:11:53,501 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:11:53,501 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:11:53,502 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:11:53,503 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:11:53,503 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:11:53,504 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:11:53,523 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:11:53,523 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:11:53,523 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:11:53,524 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:11:53,524 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:11:53,524 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:11:53,527 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:11:53,527 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:11:53,528 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:11:53,530 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T21:11:54,489 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.meta.1733173708307.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:11:55,490 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.meta.1733173708307.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:11:56,214 DEBUG [BootstrapNodeManager {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-02T21:11:56,215 INFO [RS-EventLoopGroup-12-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34690, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.5 (auth:SIMPLE), service=RegionServerStatusService 2024-12-02T21:11:56,491 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.meta.1733173708307.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:11:57,492 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.meta.1733173708307.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:11:57,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45421 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:34180 deadline: 1733173927755, exception=org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1733173883499.9f99cceab3284d89ba5d313b73c9eac6. is not online on 7d4f3b9a7081,45421,1733173881847 2024-12-02T21:11:58,493 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.meta.1733173708307.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:11:59,494 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.meta.1733173708307.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:12:00,495 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.meta.1733173708307.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:12:01,496 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.meta.1733173708307.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:12:02,496 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.meta.1733173708307.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:12:03,445 INFO [master/7d4f3b9a7081:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-12-02T21:12:03,445 INFO [master/7d4f3b9a7081:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-12-02T21:12:03,497 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.meta.1733173708307.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:12:04,497 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.meta.1733173708307.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:12:05,498 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.meta.1733173708307.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:12:06,498 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.meta.1733173708307.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:12:07,499 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.meta.1733173708307.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:12:07,758 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(324): Region 1588230740, had cached 0 bytes from a total of 14835 2024-12-02T21:12:08,500 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.meta.1733173708307.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:12:09,500 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.meta.1733173708307.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:12:10,501 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.meta.1733173708307.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:12:11,501 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.meta.1733173708307.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:12:12,502 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.meta.1733173708307.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:12:13,503 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.meta.1733173708307.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:12:14,504 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.meta.1733173708307.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:12:15,505 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.meta.1733173708307.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:12:16,505 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.meta.1733173708307.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:12:17,506 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.meta.1733173708307.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:12:18,507 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.meta.1733173708307.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:12:19,508 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.meta.1733173708307.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:12:19,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45421 {}] regionserver.HRegion(8581): Flush requested on b778fbd7043cd29c14974d14143b03a8 2024-12-02T21:12:19,889 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing b778fbd7043cd29c14974d14143b03a8 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-02T21:12:19,897 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/b778fbd7043cd29c14974d14143b03a8/.tmp/info/7d7080dd34e34636b7910bf843af287b is 1080, key is row0095/info:/1733173937875/Put/seqid=0 2024-12-02T21:12:19,903 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46855 is added to blk_1073741856_1032 (size=12513) 2024-12-02T21:12:19,903 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32945 is added to blk_1073741856_1032 (size=12513) 2024-12-02T21:12:19,904 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=132 (bloomFilter=true), to=hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/b778fbd7043cd29c14974d14143b03a8/.tmp/info/7d7080dd34e34636b7910bf843af287b 2024-12-02T21:12:19,911 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/b778fbd7043cd29c14974d14143b03a8/.tmp/info/7d7080dd34e34636b7910bf843af287b as hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/b778fbd7043cd29c14974d14143b03a8/info/7d7080dd34e34636b7910bf843af287b 2024-12-02T21:12:19,916 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/b778fbd7043cd29c14974d14143b03a8/info/7d7080dd34e34636b7910bf843af287b, entries=7, sequenceid=132, filesize=12.2 K 2024-12-02T21:12:19,917 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=18.91 KB/19368 for b778fbd7043cd29c14974d14143b03a8 in 28ms, sequenceid=132, compaction requested=false 2024-12-02T21:12:19,917 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for b778fbd7043cd29c14974d14143b03a8: 2024-12-02T21:12:19,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45421 {}] regionserver.HRegion(8581): Flush requested on b778fbd7043cd29c14974d14143b03a8 2024-12-02T21:12:19,918 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing b778fbd7043cd29c14974d14143b03a8 1/1 column families, dataSize=19.96 KB heapSize=21.63 KB 2024-12-02T21:12:19,922 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/b778fbd7043cd29c14974d14143b03a8/.tmp/info/b15409268e3b443aaf9532f08b22fa3b is 1080, key is row0102/info:/1733173939890/Put/seqid=0 2024-12-02T21:12:19,926 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32945 is added to blk_1073741857_1033 (size=25472) 2024-12-02T21:12:19,926 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46855 is added to blk_1073741857_1033 (size=25472) 2024-12-02T21:12:19,927 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=19.96 KB at sequenceid=154 (bloomFilter=true), to=hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/b778fbd7043cd29c14974d14143b03a8/.tmp/info/b15409268e3b443aaf9532f08b22fa3b 2024-12-02T21:12:19,932 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/b778fbd7043cd29c14974d14143b03a8/.tmp/info/b15409268e3b443aaf9532f08b22fa3b as hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/b778fbd7043cd29c14974d14143b03a8/info/b15409268e3b443aaf9532f08b22fa3b 2024-12-02T21:12:19,936 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/b778fbd7043cd29c14974d14143b03a8/info/b15409268e3b443aaf9532f08b22fa3b, entries=19, sequenceid=154, filesize=24.9 K 2024-12-02T21:12:19,937 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~19.96 KB/20444, heapSize ~21.61 KB/22128, currentSize=8.41 KB/8608 for b778fbd7043cd29c14974d14143b03a8 in 19ms, sequenceid=154, compaction requested=true 2024-12-02T21:12:19,937 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for b778fbd7043cd29c14974d14143b03a8: 2024-12-02T21:12:19,937 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b778fbd7043cd29c14974d14143b03a8:info, priority=-2147483648, current under compaction store size is 1 2024-12-02T21:12:19,937 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T21:12:19,937 DEBUG [RS:0;7d4f3b9a7081:45421-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-02T21:12:19,938 DEBUG [RS:0;7d4f3b9a7081:45421-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 78815 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-02T21:12:19,938 DEBUG [RS:0;7d4f3b9a7081:45421-shortCompactions-0 {}] regionserver.HStore(1540): b778fbd7043cd29c14974d14143b03a8/info is initiating minor compaction (all files) 2024-12-02T21:12:19,938 INFO [RS:0;7d4f3b9a7081:45421-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b778fbd7043cd29c14974d14143b03a8/info in TestLogRolling-testLogRolling,row0062,1733173907745.b778fbd7043cd29c14974d14143b03a8. 2024-12-02T21:12:19,938 INFO [RS:0;7d4f3b9a7081:45421-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/b778fbd7043cd29c14974d14143b03a8/info/4ec11df23e7d45acaf2ccf627d5ba52e, hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/b778fbd7043cd29c14974d14143b03a8/info/7d7080dd34e34636b7910bf843af287b, hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/b778fbd7043cd29c14974d14143b03a8/info/b15409268e3b443aaf9532f08b22fa3b] into tmpdir=hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/b778fbd7043cd29c14974d14143b03a8/.tmp, totalSize=77.0 K 2024-12-02T21:12:19,938 DEBUG [RS:0;7d4f3b9a7081:45421-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4ec11df23e7d45acaf2ccf627d5ba52e, keycount=33, bloomtype=ROW, size=39.9 K, encoding=NONE, compression=NONE, seqNum=118, earliestPutTs=1733173905672 2024-12-02T21:12:19,939 DEBUG [RS:0;7d4f3b9a7081:45421-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7d7080dd34e34636b7910bf843af287b, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=132, earliestPutTs=1733173937875 2024-12-02T21:12:19,939 DEBUG [RS:0;7d4f3b9a7081:45421-shortCompactions-0 {}] compactions.Compactor(224): Compacting b15409268e3b443aaf9532f08b22fa3b, keycount=19, bloomtype=ROW, size=24.9 K, encoding=NONE, compression=NONE, seqNum=154, earliestPutTs=1733173939890 2024-12-02T21:12:19,949 INFO [RS:0;7d4f3b9a7081:45421-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): b778fbd7043cd29c14974d14143b03a8#info#compaction#56 average throughput is 60.54 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-02T21:12:19,950 DEBUG [RS:0;7d4f3b9a7081:45421-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/b778fbd7043cd29c14974d14143b03a8/.tmp/info/a93f5addea934584b1775db4cdc97610 is 1080, key is row0062/info:/1733173905672/Put/seqid=0 2024-12-02T21:12:19,956 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32945 is added to blk_1073741858_1034 (size=69025) 2024-12-02T21:12:19,956 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46855 is added to blk_1073741858_1034 (size=69025) 2024-12-02T21:12:20,369 DEBUG [RS:0;7d4f3b9a7081:45421-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/b778fbd7043cd29c14974d14143b03a8/.tmp/info/a93f5addea934584b1775db4cdc97610 as hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/b778fbd7043cd29c14974d14143b03a8/info/a93f5addea934584b1775db4cdc97610 2024-12-02T21:12:20,376 INFO [RS:0;7d4f3b9a7081:45421-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in b778fbd7043cd29c14974d14143b03a8/info of b778fbd7043cd29c14974d14143b03a8 into a93f5addea934584b1775db4cdc97610(size=67.4 K), total size for store is 67.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T21:12:20,377 DEBUG [RS:0;7d4f3b9a7081:45421-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b778fbd7043cd29c14974d14143b03a8: 2024-12-02T21:12:20,377 INFO [RS:0;7d4f3b9a7081:45421-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1733173907745.b778fbd7043cd29c14974d14143b03a8., storeName=b778fbd7043cd29c14974d14143b03a8/info, priority=13, startTime=1733173939937; duration=0sec 2024-12-02T21:12:20,377 DEBUG [RS:0;7d4f3b9a7081:45421-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T21:12:20,377 DEBUG [RS:0;7d4f3b9a7081:45421-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b778fbd7043cd29c14974d14143b03a8:info 2024-12-02T21:12:20,509 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.meta.1733173708307.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:12:21,509 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.meta.1733173708307.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:12:21,686 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-02T21:12:21,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45421 {}] regionserver.HRegion(8581): Flush requested on b778fbd7043cd29c14974d14143b03a8 2024-12-02T21:12:21,932 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing b778fbd7043cd29c14974d14143b03a8 1/1 column families, dataSize=9.46 KB heapSize=10.38 KB 2024-12-02T21:12:21,940 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/b778fbd7043cd29c14974d14143b03a8/.tmp/info/b422e6363096422db3e93f80ccc39ef7 is 1080, key is row0121/info:/1733173939918/Put/seqid=0 2024-12-02T21:12:21,946 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46855 is added to blk_1073741859_1035 (size=14672) 2024-12-02T21:12:21,946 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32945 is added to blk_1073741859_1035 (size=14672) 2024-12-02T21:12:21,946 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=9.46 KB at sequenceid=167 (bloomFilter=true), to=hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/b778fbd7043cd29c14974d14143b03a8/.tmp/info/b422e6363096422db3e93f80ccc39ef7 2024-12-02T21:12:21,953 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/b778fbd7043cd29c14974d14143b03a8/.tmp/info/b422e6363096422db3e93f80ccc39ef7 as hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/b778fbd7043cd29c14974d14143b03a8/info/b422e6363096422db3e93f80ccc39ef7 2024-12-02T21:12:21,959 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/b778fbd7043cd29c14974d14143b03a8/info/b422e6363096422db3e93f80ccc39ef7, entries=9, sequenceid=167, filesize=14.3 K 2024-12-02T21:12:21,959 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~9.46 KB/9684, heapSize ~10.36 KB/10608, currentSize=17.86 KB/18292 for b778fbd7043cd29c14974d14143b03a8 in 28ms, sequenceid=167, compaction requested=false 2024-12-02T21:12:21,959 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for b778fbd7043cd29c14974d14143b03a8: 2024-12-02T21:12:21,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45421 {}] regionserver.HRegion(8581): Flush requested on b778fbd7043cd29c14974d14143b03a8 2024-12-02T21:12:21,960 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing b778fbd7043cd29c14974d14143b03a8 1/1 column families, dataSize=19.96 KB heapSize=21.63 KB 2024-12-02T21:12:21,964 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/b778fbd7043cd29c14974d14143b03a8/.tmp/info/dbdec5c4848c4da0aa7b7364dc75e33e is 1080, key is row0130/info:/1733173941933/Put/seqid=0 2024-12-02T21:12:21,969 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46855 is added to blk_1073741860_1036 (size=25472) 2024-12-02T21:12:21,969 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32945 is added to blk_1073741860_1036 (size=25472) 2024-12-02T21:12:21,970 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=19.96 KB at sequenceid=189 (bloomFilter=true), to=hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/b778fbd7043cd29c14974d14143b03a8/.tmp/info/dbdec5c4848c4da0aa7b7364dc75e33e 2024-12-02T21:12:21,972 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45421 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=b778fbd7043cd29c14974d14143b03a8, server=7d4f3b9a7081,45421,1733173881847 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T21:12:21,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45421 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:34180 deadline: 1733173951971, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=b778fbd7043cd29c14974d14143b03a8, server=7d4f3b9a7081,45421,1733173881847 2024-12-02T21:12:21,975 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/b778fbd7043cd29c14974d14143b03a8/.tmp/info/dbdec5c4848c4da0aa7b7364dc75e33e as hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/b778fbd7043cd29c14974d14143b03a8/info/dbdec5c4848c4da0aa7b7364dc75e33e 2024-12-02T21:12:21,979 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/b778fbd7043cd29c14974d14143b03a8/info/dbdec5c4848c4da0aa7b7364dc75e33e, entries=19, sequenceid=189, filesize=24.9 K 2024-12-02T21:12:21,980 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~19.96 KB/20444, heapSize ~21.61 KB/22128, currentSize=10.51 KB/10760 for b778fbd7043cd29c14974d14143b03a8 in 20ms, sequenceid=189, compaction requested=true 2024-12-02T21:12:21,980 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for b778fbd7043cd29c14974d14143b03a8: 2024-12-02T21:12:21,981 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b778fbd7043cd29c14974d14143b03a8:info, priority=-2147483648, current under compaction store size is 1 2024-12-02T21:12:21,981 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T21:12:21,981 DEBUG [RS:0;7d4f3b9a7081:45421-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-02T21:12:21,982 DEBUG [RS:0;7d4f3b9a7081:45421-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 109169 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-02T21:12:21,982 DEBUG [RS:0;7d4f3b9a7081:45421-shortCompactions-0 {}] regionserver.HStore(1540): b778fbd7043cd29c14974d14143b03a8/info is initiating minor compaction (all files) 2024-12-02T21:12:21,982 INFO [RS:0;7d4f3b9a7081:45421-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b778fbd7043cd29c14974d14143b03a8/info in TestLogRolling-testLogRolling,row0062,1733173907745.b778fbd7043cd29c14974d14143b03a8. 2024-12-02T21:12:21,982 INFO [RS:0;7d4f3b9a7081:45421-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/b778fbd7043cd29c14974d14143b03a8/info/a93f5addea934584b1775db4cdc97610, hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/b778fbd7043cd29c14974d14143b03a8/info/b422e6363096422db3e93f80ccc39ef7, hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/b778fbd7043cd29c14974d14143b03a8/info/dbdec5c4848c4da0aa7b7364dc75e33e] into tmpdir=hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/b778fbd7043cd29c14974d14143b03a8/.tmp, totalSize=106.6 K 2024-12-02T21:12:21,983 DEBUG [RS:0;7d4f3b9a7081:45421-shortCompactions-0 {}] compactions.Compactor(224): Compacting a93f5addea934584b1775db4cdc97610, keycount=59, bloomtype=ROW, size=67.4 K, encoding=NONE, compression=NONE, seqNum=154, earliestPutTs=1733173905672 2024-12-02T21:12:21,983 DEBUG [RS:0;7d4f3b9a7081:45421-shortCompactions-0 {}] compactions.Compactor(224): Compacting b422e6363096422db3e93f80ccc39ef7, keycount=9, bloomtype=ROW, size=14.3 K, encoding=NONE, compression=NONE, seqNum=167, earliestPutTs=1733173939918 2024-12-02T21:12:21,983 DEBUG [RS:0;7d4f3b9a7081:45421-shortCompactions-0 {}] compactions.Compactor(224): Compacting dbdec5c4848c4da0aa7b7364dc75e33e, keycount=19, bloomtype=ROW, size=24.9 K, encoding=NONE, compression=NONE, seqNum=189, earliestPutTs=1733173941933 2024-12-02T21:12:21,992 INFO [RS:0;7d4f3b9a7081:45421-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): b778fbd7043cd29c14974d14143b03a8#info#compaction#59 average throughput is 89.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-02T21:12:21,993 DEBUG [RS:0;7d4f3b9a7081:45421-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/b778fbd7043cd29c14974d14143b03a8/.tmp/info/b884cfe3a91f437aba59500e658ebbb4 is 1080, key is row0062/info:/1733173905672/Put/seqid=0 2024-12-02T21:12:21,996 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32945 is added to blk_1073741861_1037 (size=99388) 2024-12-02T21:12:21,996 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46855 is added to blk_1073741861_1037 (size=99388) 2024-12-02T21:12:22,021 DEBUG [RS:0;7d4f3b9a7081:45421-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/b778fbd7043cd29c14974d14143b03a8/.tmp/info/b884cfe3a91f437aba59500e658ebbb4 as hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/b778fbd7043cd29c14974d14143b03a8/info/b884cfe3a91f437aba59500e658ebbb4 2024-12-02T21:12:22,025 INFO [RS:0;7d4f3b9a7081:45421-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in b778fbd7043cd29c14974d14143b03a8/info of b778fbd7043cd29c14974d14143b03a8 into b884cfe3a91f437aba59500e658ebbb4(size=97.1 K), total size for store is 97.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T21:12:22,025 DEBUG [RS:0;7d4f3b9a7081:45421-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b778fbd7043cd29c14974d14143b03a8: 2024-12-02T21:12:22,025 INFO [RS:0;7d4f3b9a7081:45421-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1733173907745.b778fbd7043cd29c14974d14143b03a8., storeName=b778fbd7043cd29c14974d14143b03a8/info, priority=13, startTime=1733173941981; duration=0sec 2024-12-02T21:12:22,025 DEBUG [RS:0;7d4f3b9a7081:45421-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T21:12:22,025 DEBUG [RS:0;7d4f3b9a7081:45421-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b778fbd7043cd29c14974d14143b03a8:info 2024-12-02T21:12:22,510 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.meta.1733173708307.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:12:22,841 DEBUG [master/7d4f3b9a7081:0.Chore.1 {}] balancer.RegionLocationFinder(172): Locality for region 5dda45032950b51afffe60c272c5fabd changed from -1.0 to 0.0, refreshing cache 2024-12-02T21:12:23,511 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.meta.1733173708307.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:12:24,512 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.meta.1733173708307.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:12:25,513 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.meta.1733173708307.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:12:26,514 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.meta.1733173708307.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:12:27,515 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.meta.1733173708307.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:12:28,516 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.meta.1733173708307.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:12:29,517 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.meta.1733173708307.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:12:30,518 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.meta.1733173708307.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:12:31,518 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.meta.1733173708307.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:12:32,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45421 {}] regionserver.HRegion(8581): Flush requested on b778fbd7043cd29c14974d14143b03a8 2024-12-02T21:12:32,042 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing b778fbd7043cd29c14974d14143b03a8 1/1 column families, dataSize=11.56 KB heapSize=12.63 KB 2024-12-02T21:12:32,048 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/b778fbd7043cd29c14974d14143b03a8/.tmp/info/25b2cb671281403ead48266c8e1da121 is 1080, key is row0149/info:/1733173941961/Put/seqid=0 2024-12-02T21:12:32,053 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32945 is added to blk_1073741862_1038 (size=16828) 2024-12-02T21:12:32,054 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46855 is added to blk_1073741862_1038 (size=16828) 2024-12-02T21:12:32,054 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.56 KB at sequenceid=204 (bloomFilter=true), to=hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/b778fbd7043cd29c14974d14143b03a8/.tmp/info/25b2cb671281403ead48266c8e1da121 2024-12-02T21:12:32,061 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/b778fbd7043cd29c14974d14143b03a8/.tmp/info/25b2cb671281403ead48266c8e1da121 as hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/b778fbd7043cd29c14974d14143b03a8/info/25b2cb671281403ead48266c8e1da121 2024-12-02T21:12:32,067 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/b778fbd7043cd29c14974d14143b03a8/info/25b2cb671281403ead48266c8e1da121, entries=11, sequenceid=204, filesize=16.4 K 2024-12-02T21:12:32,068 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~11.56 KB/11836, heapSize ~12.61 KB/12912, currentSize=1.05 KB/1076 for b778fbd7043cd29c14974d14143b03a8 in 26ms, sequenceid=204, compaction requested=false 2024-12-02T21:12:32,068 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for b778fbd7043cd29c14974d14143b03a8: 2024-12-02T21:12:32,519 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.meta.1733173708307.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:12:32,520 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=3 on file=hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.meta.1733173708307.meta after 196176ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor199.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T21:12:33,402 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(324): Region b778fbd7043cd29c14974d14143b03a8, had cached 0 bytes from a total of 116216 2024-12-02T21:12:33,437 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(324): Region cedffaa969ea2cf9d508ee3a4b7624cb, had cached 0 bytes from a total of 70862 2024-12-02T21:12:33,521 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.meta.1733173708307.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:12:34,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45421 {}] regionserver.HRegion(8581): Flush requested on b778fbd7043cd29c14974d14143b03a8 2024-12-02T21:12:34,053 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing b778fbd7043cd29c14974d14143b03a8 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-02T21:12:34,059 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/b778fbd7043cd29c14974d14143b03a8/.tmp/info/9762a453bcd54ca186d750e02dd1461a is 1080, key is row0160/info:/1733173952044/Put/seqid=0 2024-12-02T21:12:34,065 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32945 is added to blk_1073741863_1039 (size=12516) 2024-12-02T21:12:34,066 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46855 is added to blk_1073741863_1039 (size=12516) 2024-12-02T21:12:34,067 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=214 (bloomFilter=true), to=hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/b778fbd7043cd29c14974d14143b03a8/.tmp/info/9762a453bcd54ca186d750e02dd1461a 2024-12-02T21:12:34,073 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/b778fbd7043cd29c14974d14143b03a8/.tmp/info/9762a453bcd54ca186d750e02dd1461a as hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/b778fbd7043cd29c14974d14143b03a8/info/9762a453bcd54ca186d750e02dd1461a 2024-12-02T21:12:34,080 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/b778fbd7043cd29c14974d14143b03a8/info/9762a453bcd54ca186d750e02dd1461a, entries=7, sequenceid=214, filesize=12.2 K 2024-12-02T21:12:34,080 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=19.96 KB/20444 for b778fbd7043cd29c14974d14143b03a8 in 27ms, sequenceid=214, compaction requested=true 2024-12-02T21:12:34,081 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for b778fbd7043cd29c14974d14143b03a8: 2024-12-02T21:12:34,081 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b778fbd7043cd29c14974d14143b03a8:info, priority=-2147483648, current under compaction store size is 1 2024-12-02T21:12:34,081 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T21:12:34,081 DEBUG [RS:0;7d4f3b9a7081:45421-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-02T21:12:34,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45421 {}] regionserver.HRegion(8581): Flush requested on b778fbd7043cd29c14974d14143b03a8 2024-12-02T21:12:34,081 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing b778fbd7043cd29c14974d14143b03a8 1/1 column families, dataSize=21.02 KB heapSize=22.75 KB 2024-12-02T21:12:34,082 DEBUG [RS:0;7d4f3b9a7081:45421-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 128732 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-02T21:12:34,082 DEBUG [RS:0;7d4f3b9a7081:45421-shortCompactions-0 {}] regionserver.HStore(1540): b778fbd7043cd29c14974d14143b03a8/info is initiating minor compaction (all files) 2024-12-02T21:12:34,082 INFO [RS:0;7d4f3b9a7081:45421-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b778fbd7043cd29c14974d14143b03a8/info in TestLogRolling-testLogRolling,row0062,1733173907745.b778fbd7043cd29c14974d14143b03a8. 2024-12-02T21:12:34,082 INFO [RS:0;7d4f3b9a7081:45421-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/b778fbd7043cd29c14974d14143b03a8/info/b884cfe3a91f437aba59500e658ebbb4, hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/b778fbd7043cd29c14974d14143b03a8/info/25b2cb671281403ead48266c8e1da121, hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/b778fbd7043cd29c14974d14143b03a8/info/9762a453bcd54ca186d750e02dd1461a] into tmpdir=hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/b778fbd7043cd29c14974d14143b03a8/.tmp, totalSize=125.7 K 2024-12-02T21:12:34,083 DEBUG [RS:0;7d4f3b9a7081:45421-shortCompactions-0 {}] compactions.Compactor(224): Compacting b884cfe3a91f437aba59500e658ebbb4, keycount=87, bloomtype=ROW, size=97.1 K, encoding=NONE, compression=NONE, seqNum=189, earliestPutTs=1733173905672 2024-12-02T21:12:34,083 DEBUG [RS:0;7d4f3b9a7081:45421-shortCompactions-0 {}] compactions.Compactor(224): Compacting 25b2cb671281403ead48266c8e1da121, keycount=11, bloomtype=ROW, size=16.4 K, encoding=NONE, compression=NONE, seqNum=204, earliestPutTs=1733173941961 2024-12-02T21:12:34,084 DEBUG [RS:0;7d4f3b9a7081:45421-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9762a453bcd54ca186d750e02dd1461a, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=214, earliestPutTs=1733173952044 2024-12-02T21:12:34,085 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/b778fbd7043cd29c14974d14143b03a8/.tmp/info/46d0979c67964d2ea9d621e72e94a8fb is 1080, key is row0167/info:/1733173954054/Put/seqid=0 2024-12-02T21:12:34,090 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46855 is added to blk_1073741864_1040 (size=26550) 2024-12-02T21:12:34,090 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32945 is added to blk_1073741864_1040 (size=26550) 2024-12-02T21:12:34,091 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=21.02 KB at sequenceid=237 (bloomFilter=true), to=hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/b778fbd7043cd29c14974d14143b03a8/.tmp/info/46d0979c67964d2ea9d621e72e94a8fb 2024-12-02T21:12:34,096 INFO [RS:0;7d4f3b9a7081:45421-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): b778fbd7043cd29c14974d14143b03a8#info#compaction#63 average throughput is 53.87 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-02T21:12:34,097 DEBUG [RS:0;7d4f3b9a7081:45421-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/b778fbd7043cd29c14974d14143b03a8/.tmp/info/8e331dd0b6984755951528c31b1f9c14 is 1080, key is row0062/info:/1733173905672/Put/seqid=0 2024-12-02T21:12:34,097 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/b778fbd7043cd29c14974d14143b03a8/.tmp/info/46d0979c67964d2ea9d621e72e94a8fb as hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/b778fbd7043cd29c14974d14143b03a8/info/46d0979c67964d2ea9d621e72e94a8fb 2024-12-02T21:12:34,101 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/b778fbd7043cd29c14974d14143b03a8/info/46d0979c67964d2ea9d621e72e94a8fb, entries=20, sequenceid=237, filesize=25.9 K 2024-12-02T21:12:34,102 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~21.02 KB/21520, heapSize ~22.73 KB/23280, currentSize=6.30 KB/6456 for b778fbd7043cd29c14974d14143b03a8 in 21ms, sequenceid=237, compaction requested=false 2024-12-02T21:12:34,102 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for b778fbd7043cd29c14974d14143b03a8: 2024-12-02T21:12:34,105 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32945 is added to blk_1073741865_1041 (size=118898) 2024-12-02T21:12:34,105 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46855 is added to blk_1073741865_1041 (size=118898) 2024-12-02T21:12:34,110 DEBUG [RS:0;7d4f3b9a7081:45421-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/b778fbd7043cd29c14974d14143b03a8/.tmp/info/8e331dd0b6984755951528c31b1f9c14 as hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/b778fbd7043cd29c14974d14143b03a8/info/8e331dd0b6984755951528c31b1f9c14 2024-12-02T21:12:34,116 INFO [RS:0;7d4f3b9a7081:45421-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in b778fbd7043cd29c14974d14143b03a8/info of b778fbd7043cd29c14974d14143b03a8 into 8e331dd0b6984755951528c31b1f9c14(size=116.1 K), total size for store is 142.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T21:12:34,117 DEBUG [RS:0;7d4f3b9a7081:45421-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b778fbd7043cd29c14974d14143b03a8: 2024-12-02T21:12:34,117 INFO [RS:0;7d4f3b9a7081:45421-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1733173907745.b778fbd7043cd29c14974d14143b03a8., storeName=b778fbd7043cd29c14974d14143b03a8/info, priority=13, startTime=1733173954081; duration=0sec 2024-12-02T21:12:34,117 DEBUG [RS:0;7d4f3b9a7081:45421-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T21:12:34,117 DEBUG [RS:0;7d4f3b9a7081:45421-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b778fbd7043cd29c14974d14143b03a8:info 2024-12-02T21:12:34,522 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.meta.1733173708307.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:12:35,522 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.meta.1733173708307.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:12:36,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45421 {}] regionserver.HRegion(8581): Flush requested on b778fbd7043cd29c14974d14143b03a8 2024-12-02T21:12:36,093 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing b778fbd7043cd29c14974d14143b03a8 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-02T21:12:36,100 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/b778fbd7043cd29c14974d14143b03a8/.tmp/info/9fa81858c602493e85c80f3bb8a45a30 is 1080, key is row0187/info:/1733173954082/Put/seqid=0 2024-12-02T21:12:36,106 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46855 is added to blk_1073741866_1042 (size=12516) 2024-12-02T21:12:36,107 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32945 is added to blk_1073741866_1042 (size=12516) 2024-12-02T21:12:36,107 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=248 (bloomFilter=true), to=hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/b778fbd7043cd29c14974d14143b03a8/.tmp/info/9fa81858c602493e85c80f3bb8a45a30 2024-12-02T21:12:36,114 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/b778fbd7043cd29c14974d14143b03a8/.tmp/info/9fa81858c602493e85c80f3bb8a45a30 as hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/b778fbd7043cd29c14974d14143b03a8/info/9fa81858c602493e85c80f3bb8a45a30 2024-12-02T21:12:36,120 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/b778fbd7043cd29c14974d14143b03a8/info/9fa81858c602493e85c80f3bb8a45a30, entries=7, sequenceid=248, filesize=12.2 K 2024-12-02T21:12:36,121 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=17.86 KB/18292 for b778fbd7043cd29c14974d14143b03a8 in 28ms, sequenceid=248, compaction requested=true 2024-12-02T21:12:36,121 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for b778fbd7043cd29c14974d14143b03a8: 2024-12-02T21:12:36,121 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b778fbd7043cd29c14974d14143b03a8:info, priority=-2147483648, current under compaction store size is 1 2024-12-02T21:12:36,121 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T21:12:36,121 DEBUG [RS:0;7d4f3b9a7081:45421-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-02T21:12:36,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45421 {}] regionserver.HRegion(8581): Flush requested on b778fbd7043cd29c14974d14143b03a8 2024-12-02T21:12:36,121 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing b778fbd7043cd29c14974d14143b03a8 1/1 column families, dataSize=18.91 KB heapSize=20.50 KB 2024-12-02T21:12:36,123 DEBUG [RS:0;7d4f3b9a7081:45421-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 157964 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-02T21:12:36,123 DEBUG [RS:0;7d4f3b9a7081:45421-shortCompactions-0 {}] regionserver.HStore(1540): b778fbd7043cd29c14974d14143b03a8/info is initiating minor compaction (all files) 2024-12-02T21:12:36,123 INFO [RS:0;7d4f3b9a7081:45421-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b778fbd7043cd29c14974d14143b03a8/info in TestLogRolling-testLogRolling,row0062,1733173907745.b778fbd7043cd29c14974d14143b03a8. 2024-12-02T21:12:36,123 INFO [RS:0;7d4f3b9a7081:45421-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/b778fbd7043cd29c14974d14143b03a8/info/8e331dd0b6984755951528c31b1f9c14, hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/b778fbd7043cd29c14974d14143b03a8/info/46d0979c67964d2ea9d621e72e94a8fb, hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/b778fbd7043cd29c14974d14143b03a8/info/9fa81858c602493e85c80f3bb8a45a30] into tmpdir=hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/b778fbd7043cd29c14974d14143b03a8/.tmp, totalSize=154.3 K 2024-12-02T21:12:36,124 DEBUG [RS:0;7d4f3b9a7081:45421-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8e331dd0b6984755951528c31b1f9c14, keycount=105, bloomtype=ROW, size=116.1 K, encoding=NONE, compression=NONE, seqNum=214, earliestPutTs=1733173905672 2024-12-02T21:12:36,124 DEBUG [RS:0;7d4f3b9a7081:45421-shortCompactions-0 {}] compactions.Compactor(224): Compacting 46d0979c67964d2ea9d621e72e94a8fb, keycount=20, bloomtype=ROW, size=25.9 K, encoding=NONE, compression=NONE, seqNum=237, earliestPutTs=1733173954054 2024-12-02T21:12:36,125 DEBUG [RS:0;7d4f3b9a7081:45421-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9fa81858c602493e85c80f3bb8a45a30, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=248, earliestPutTs=1733173954082 2024-12-02T21:12:36,126 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/b778fbd7043cd29c14974d14143b03a8/.tmp/info/d110ad36115b41e8b5639901632fd80b is 1080, key is row0194/info:/1733173956094/Put/seqid=0 2024-12-02T21:12:36,130 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46855 is added to blk_1073741867_1043 (size=24406) 2024-12-02T21:12:36,130 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32945 is added to blk_1073741867_1043 (size=24406) 2024-12-02T21:12:36,131 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=18.91 KB at sequenceid=269 (bloomFilter=true), to=hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/b778fbd7043cd29c14974d14143b03a8/.tmp/info/d110ad36115b41e8b5639901632fd80b 2024-12-02T21:12:36,137 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45421 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=b778fbd7043cd29c14974d14143b03a8, server=7d4f3b9a7081,45421,1733173881847 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-02T21:12:36,137 INFO [RS:0;7d4f3b9a7081:45421-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): b778fbd7043cd29c14974d14143b03a8#info#compaction#66 average throughput is 67.73 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-02T21:12:36,137 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/b778fbd7043cd29c14974d14143b03a8/.tmp/info/d110ad36115b41e8b5639901632fd80b as hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/b778fbd7043cd29c14974d14143b03a8/info/d110ad36115b41e8b5639901632fd80b 2024-12-02T21:12:36,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45421 {}] ipc.CallRunner(138): callId: 241 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:34180 deadline: 1733173966137, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=b778fbd7043cd29c14974d14143b03a8, server=7d4f3b9a7081,45421,1733173881847 2024-12-02T21:12:36,138 DEBUG [RS:0;7d4f3b9a7081:45421-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/b778fbd7043cd29c14974d14143b03a8/.tmp/info/f56e5d95ace24a73b306bee3cfb32e68 is 1080, key is row0062/info:/1733173905672/Put/seqid=0 2024-12-02T21:12:36,141 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32945 is added to blk_1073741868_1044 (size=148311) 2024-12-02T21:12:36,141 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46855 is added to blk_1073741868_1044 (size=148311) 2024-12-02T21:12:36,143 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/b778fbd7043cd29c14974d14143b03a8/info/d110ad36115b41e8b5639901632fd80b, entries=18, sequenceid=269, filesize=23.8 K 2024-12-02T21:12:36,144 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~18.91 KB/19368, heapSize ~20.48 KB/20976, currentSize=11.56 KB/11836 for b778fbd7043cd29c14974d14143b03a8 in 23ms, sequenceid=269, compaction requested=false 2024-12-02T21:12:36,144 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for b778fbd7043cd29c14974d14143b03a8: 2024-12-02T21:12:36,148 DEBUG [RS:0;7d4f3b9a7081:45421-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/b778fbd7043cd29c14974d14143b03a8/.tmp/info/f56e5d95ace24a73b306bee3cfb32e68 as hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/b778fbd7043cd29c14974d14143b03a8/info/f56e5d95ace24a73b306bee3cfb32e68 2024-12-02T21:12:36,154 INFO [RS:0;7d4f3b9a7081:45421-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in b778fbd7043cd29c14974d14143b03a8/info of b778fbd7043cd29c14974d14143b03a8 into f56e5d95ace24a73b306bee3cfb32e68(size=144.8 K), total size for store is 168.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T21:12:36,154 DEBUG [RS:0;7d4f3b9a7081:45421-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b778fbd7043cd29c14974d14143b03a8: 2024-12-02T21:12:36,154 INFO [RS:0;7d4f3b9a7081:45421-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1733173907745.b778fbd7043cd29c14974d14143b03a8., storeName=b778fbd7043cd29c14974d14143b03a8/info, priority=13, startTime=1733173956121; duration=0sec 2024-12-02T21:12:36,154 DEBUG [RS:0;7d4f3b9a7081:45421-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T21:12:36,154 DEBUG [RS:0;7d4f3b9a7081:45421-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b778fbd7043cd29c14974d14143b03a8:info 2024-12-02T21:12:36,523 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.meta.1733173708307.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:12:37,524 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.meta.1733173708307.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:12:38,525 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.meta.1733173708307.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:12:39,527 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.meta.1733173708307.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:12:40,527 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.meta.1733173708307.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:12:41,529 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.meta.1733173708307.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:12:42,529 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.meta.1733173708307.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:12:43,530 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.meta.1733173708307.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:12:44,531 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.meta.1733173708307.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:12:45,533 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.meta.1733173708307.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:12:46,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45421 {}] regionserver.HRegion(8581): Flush requested on b778fbd7043cd29c14974d14143b03a8 2024-12-02T21:12:46,191 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing b778fbd7043cd29c14974d14143b03a8 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-12-02T21:12:46,197 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/b778fbd7043cd29c14974d14143b03a8/.tmp/info/714231c026b6483681e334c873042721 is 1080, key is row0212/info:/1733173956122/Put/seqid=0 2024-12-02T21:12:46,201 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32945 is added to blk_1073741869_1045 (size=17918) 2024-12-02T21:12:46,202 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46855 is added to blk_1073741869_1045 (size=17918) 2024-12-02T21:12:46,202 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=285 (bloomFilter=true), to=hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/b778fbd7043cd29c14974d14143b03a8/.tmp/info/714231c026b6483681e334c873042721 2024-12-02T21:12:46,208 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/b778fbd7043cd29c14974d14143b03a8/.tmp/info/714231c026b6483681e334c873042721 as hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/b778fbd7043cd29c14974d14143b03a8/info/714231c026b6483681e334c873042721 2024-12-02T21:12:46,213 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/b778fbd7043cd29c14974d14143b03a8/info/714231c026b6483681e334c873042721, entries=12, sequenceid=285, filesize=17.5 K 2024-12-02T21:12:46,214 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=1.05 KB/1076 for b778fbd7043cd29c14974d14143b03a8 in 23ms, sequenceid=285, compaction requested=true 2024-12-02T21:12:46,215 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for b778fbd7043cd29c14974d14143b03a8: 2024-12-02T21:12:46,215 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b778fbd7043cd29c14974d14143b03a8:info, priority=-2147483648, current under compaction store size is 1 2024-12-02T21:12:46,215 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T21:12:46,215 DEBUG [RS:0;7d4f3b9a7081:45421-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-02T21:12:46,216 DEBUG [RS:0;7d4f3b9a7081:45421-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 190635 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-02T21:12:46,216 DEBUG [RS:0;7d4f3b9a7081:45421-shortCompactions-0 {}] regionserver.HStore(1540): b778fbd7043cd29c14974d14143b03a8/info is initiating minor compaction (all files) 2024-12-02T21:12:46,216 INFO [RS:0;7d4f3b9a7081:45421-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b778fbd7043cd29c14974d14143b03a8/info in TestLogRolling-testLogRolling,row0062,1733173907745.b778fbd7043cd29c14974d14143b03a8. 2024-12-02T21:12:46,216 INFO [RS:0;7d4f3b9a7081:45421-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/b778fbd7043cd29c14974d14143b03a8/info/f56e5d95ace24a73b306bee3cfb32e68, hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/b778fbd7043cd29c14974d14143b03a8/info/d110ad36115b41e8b5639901632fd80b, hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/b778fbd7043cd29c14974d14143b03a8/info/714231c026b6483681e334c873042721] into tmpdir=hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/b778fbd7043cd29c14974d14143b03a8/.tmp, totalSize=186.2 K 2024-12-02T21:12:46,217 DEBUG [RS:0;7d4f3b9a7081:45421-shortCompactions-0 {}] compactions.Compactor(224): Compacting f56e5d95ace24a73b306bee3cfb32e68, keycount=132, bloomtype=ROW, size=144.8 K, encoding=NONE, compression=NONE, seqNum=248, earliestPutTs=1733173905672 2024-12-02T21:12:46,217 DEBUG [RS:0;7d4f3b9a7081:45421-shortCompactions-0 {}] compactions.Compactor(224): Compacting d110ad36115b41e8b5639901632fd80b, keycount=18, bloomtype=ROW, size=23.8 K, encoding=NONE, compression=NONE, seqNum=269, earliestPutTs=1733173956094 2024-12-02T21:12:46,217 DEBUG [RS:0;7d4f3b9a7081:45421-shortCompactions-0 {}] compactions.Compactor(224): Compacting 714231c026b6483681e334c873042721, keycount=12, bloomtype=ROW, size=17.5 K, encoding=NONE, compression=NONE, seqNum=285, earliestPutTs=1733173956122 2024-12-02T21:12:46,229 INFO [RS:0;7d4f3b9a7081:45421-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): b778fbd7043cd29c14974d14143b03a8#info#compaction#68 average throughput is 83.12 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-02T21:12:46,230 DEBUG [RS:0;7d4f3b9a7081:45421-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/b778fbd7043cd29c14974d14143b03a8/.tmp/info/3e23c80ff10244c8ab636445ec0aae62 is 1080, key is row0062/info:/1733173905672/Put/seqid=0 2024-12-02T21:12:46,233 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46855 is added to blk_1073741870_1046 (size=180785) 2024-12-02T21:12:46,233 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32945 is added to blk_1073741870_1046 (size=180785) 2024-12-02T21:12:46,238 DEBUG [RS:0;7d4f3b9a7081:45421-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/b778fbd7043cd29c14974d14143b03a8/.tmp/info/3e23c80ff10244c8ab636445ec0aae62 as hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/b778fbd7043cd29c14974d14143b03a8/info/3e23c80ff10244c8ab636445ec0aae62 2024-12-02T21:12:46,244 INFO [RS:0;7d4f3b9a7081:45421-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in b778fbd7043cd29c14974d14143b03a8/info of b778fbd7043cd29c14974d14143b03a8 into 3e23c80ff10244c8ab636445ec0aae62(size=176.5 K), total size for store is 176.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T21:12:46,244 DEBUG [RS:0;7d4f3b9a7081:45421-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b778fbd7043cd29c14974d14143b03a8: 2024-12-02T21:12:46,244 INFO [RS:0;7d4f3b9a7081:45421-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1733173907745.b778fbd7043cd29c14974d14143b03a8., storeName=b778fbd7043cd29c14974d14143b03a8/info, priority=13, startTime=1733173966215; duration=0sec 2024-12-02T21:12:46,244 DEBUG [RS:0;7d4f3b9a7081:45421-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T21:12:46,244 DEBUG [RS:0;7d4f3b9a7081:45421-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b778fbd7043cd29c14974d14143b03a8:info 2024-12-02T21:12:46,534 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.meta.1733173708307.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:12:47,535 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.meta.1733173708307.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:12:48,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45421 {}] regionserver.HRegion(8581): Flush requested on b778fbd7043cd29c14974d14143b03a8 2024-12-02T21:12:48,205 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing b778fbd7043cd29c14974d14143b03a8 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-02T21:12:48,210 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/b778fbd7043cd29c14974d14143b03a8/.tmp/info/d0e46cb0a82848e2b8b9135ffacddadc is 1080, key is row0224/info:/1733173966192/Put/seqid=0 2024-12-02T21:12:48,220 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46855 is added to blk_1073741871_1047 (size=12523) 2024-12-02T21:12:48,220 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32945 is added to blk_1073741871_1047 (size=12523) 2024-12-02T21:12:48,220 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=296 (bloomFilter=true), to=hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/b778fbd7043cd29c14974d14143b03a8/.tmp/info/d0e46cb0a82848e2b8b9135ffacddadc 2024-12-02T21:12:48,226 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/b778fbd7043cd29c14974d14143b03a8/.tmp/info/d0e46cb0a82848e2b8b9135ffacddadc as hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/b778fbd7043cd29c14974d14143b03a8/info/d0e46cb0a82848e2b8b9135ffacddadc 2024-12-02T21:12:48,231 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/b778fbd7043cd29c14974d14143b03a8/info/d0e46cb0a82848e2b8b9135ffacddadc, entries=7, sequenceid=296, filesize=12.2 K 2024-12-02T21:12:48,232 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=22.07 KB/22596 for b778fbd7043cd29c14974d14143b03a8 in 27ms, sequenceid=296, compaction requested=false 2024-12-02T21:12:48,232 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for b778fbd7043cd29c14974d14143b03a8: 2024-12-02T21:12:48,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45421 {}] regionserver.HRegion(8581): Flush requested on b778fbd7043cd29c14974d14143b03a8 2024-12-02T21:12:48,233 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing b778fbd7043cd29c14974d14143b03a8 1/1 column families, dataSize=23.12 KB heapSize=25 KB 2024-12-02T21:12:48,237 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/b778fbd7043cd29c14974d14143b03a8/.tmp/info/c3114d3647ee41f2802d4e5d5465ecbd is 1080, key is row0231/info:/1733173968205/Put/seqid=0 2024-12-02T21:12:48,242 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46855 is added to blk_1073741872_1048 (size=28728) 2024-12-02T21:12:48,242 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32945 is added to blk_1073741872_1048 (size=28728) 2024-12-02T21:12:48,243 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=23.12 KB at sequenceid=321 (bloomFilter=true), to=hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/b778fbd7043cd29c14974d14143b03a8/.tmp/info/c3114d3647ee41f2802d4e5d5465ecbd 2024-12-02T21:12:48,248 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/b778fbd7043cd29c14974d14143b03a8/.tmp/info/c3114d3647ee41f2802d4e5d5465ecbd as hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/b778fbd7043cd29c14974d14143b03a8/info/c3114d3647ee41f2802d4e5d5465ecbd 2024-12-02T21:12:48,252 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/b778fbd7043cd29c14974d14143b03a8/info/c3114d3647ee41f2802d4e5d5465ecbd, entries=22, sequenceid=321, filesize=28.1 K 2024-12-02T21:12:48,253 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~23.12 KB/23672, heapSize ~24.98 KB/25584, currentSize=4.20 KB/4304 for b778fbd7043cd29c14974d14143b03a8 in 20ms, sequenceid=321, compaction requested=true 2024-12-02T21:12:48,253 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for b778fbd7043cd29c14974d14143b03a8: 2024-12-02T21:12:48,253 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b778fbd7043cd29c14974d14143b03a8:info, priority=-2147483648, current under compaction store size is 1 2024-12-02T21:12:48,253 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T21:12:48,253 DEBUG [RS:0;7d4f3b9a7081:45421-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-02T21:12:48,254 DEBUG [RS:0;7d4f3b9a7081:45421-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 222036 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-02T21:12:48,254 DEBUG [RS:0;7d4f3b9a7081:45421-shortCompactions-0 {}] regionserver.HStore(1540): b778fbd7043cd29c14974d14143b03a8/info is initiating minor compaction (all files) 2024-12-02T21:12:48,254 INFO [RS:0;7d4f3b9a7081:45421-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b778fbd7043cd29c14974d14143b03a8/info in TestLogRolling-testLogRolling,row0062,1733173907745.b778fbd7043cd29c14974d14143b03a8. 2024-12-02T21:12:48,254 INFO [RS:0;7d4f3b9a7081:45421-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/b778fbd7043cd29c14974d14143b03a8/info/3e23c80ff10244c8ab636445ec0aae62, hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/b778fbd7043cd29c14974d14143b03a8/info/d0e46cb0a82848e2b8b9135ffacddadc, hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/b778fbd7043cd29c14974d14143b03a8/info/c3114d3647ee41f2802d4e5d5465ecbd] into tmpdir=hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/b778fbd7043cd29c14974d14143b03a8/.tmp, totalSize=216.8 K 2024-12-02T21:12:48,255 DEBUG [RS:0;7d4f3b9a7081:45421-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3e23c80ff10244c8ab636445ec0aae62, keycount=162, bloomtype=ROW, size=176.5 K, encoding=NONE, compression=NONE, seqNum=285, earliestPutTs=1733173905672 2024-12-02T21:12:48,255 DEBUG [RS:0;7d4f3b9a7081:45421-shortCompactions-0 {}] compactions.Compactor(224): Compacting d0e46cb0a82848e2b8b9135ffacddadc, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=296, earliestPutTs=1733173966192 2024-12-02T21:12:48,255 DEBUG [RS:0;7d4f3b9a7081:45421-shortCompactions-0 {}] compactions.Compactor(224): Compacting c3114d3647ee41f2802d4e5d5465ecbd, keycount=22, bloomtype=ROW, size=28.1 K, encoding=NONE, compression=NONE, seqNum=321, earliestPutTs=1733173968205 2024-12-02T21:12:48,267 INFO [RS:0;7d4f3b9a7081:45421-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): b778fbd7043cd29c14974d14143b03a8#info#compaction#71 average throughput is 65.33 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-02T21:12:48,267 DEBUG [RS:0;7d4f3b9a7081:45421-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/b778fbd7043cd29c14974d14143b03a8/.tmp/info/83a187baadd24228b1b1234ad963e003 is 1080, key is row0062/info:/1733173905672/Put/seqid=0 2024-12-02T21:12:48,270 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46855 is added to blk_1073741873_1049 (size=212255) 2024-12-02T21:12:48,270 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32945 is added to blk_1073741873_1049 (size=212255) 2024-12-02T21:12:48,276 DEBUG [RS:0;7d4f3b9a7081:45421-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/b778fbd7043cd29c14974d14143b03a8/.tmp/info/83a187baadd24228b1b1234ad963e003 as hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/b778fbd7043cd29c14974d14143b03a8/info/83a187baadd24228b1b1234ad963e003 2024-12-02T21:12:48,282 INFO [RS:0;7d4f3b9a7081:45421-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in b778fbd7043cd29c14974d14143b03a8/info of b778fbd7043cd29c14974d14143b03a8 into 83a187baadd24228b1b1234ad963e003(size=207.3 K), total size for store is 207.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T21:12:48,282 DEBUG [RS:0;7d4f3b9a7081:45421-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b778fbd7043cd29c14974d14143b03a8: 2024-12-02T21:12:48,282 INFO [RS:0;7d4f3b9a7081:45421-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1733173907745.b778fbd7043cd29c14974d14143b03a8., storeName=b778fbd7043cd29c14974d14143b03a8/info, priority=13, startTime=1733173968253; duration=0sec 2024-12-02T21:12:48,283 DEBUG [RS:0;7d4f3b9a7081:45421-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T21:12:48,283 DEBUG [RS:0;7d4f3b9a7081:45421-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b778fbd7043cd29c14974d14143b03a8:info 2024-12-02T21:12:48,536 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.meta.1733173708307.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:12:49,537 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.meta.1733173708307.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:12:50,237 INFO [Time-limited test {}] wal.AbstractTestLogRolling(285): after writing there are 0 log files 2024-12-02T21:12:50,238 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7d4f3b9a7081%2C45421%2C1733173881847.1733173970237 2024-12-02T21:12:50,244 INFO [Time-limited test {}] wal.AbstractFSWAL(837): Rolled WAL /user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/WALs/7d4f3b9a7081,45421,1733173881847/7d4f3b9a7081%2C45421%2C1733173881847.1733173882379 with entries=311, filesize=307.59 KB; new WAL /user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/WALs/7d4f3b9a7081,45421,1733173881847/7d4f3b9a7081%2C45421%2C1733173881847.1733173970237 2024-12-02T21:12:50,245 DEBUG [Time-limited test {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33209:33209),(127.0.0.1/127.0.0.1:42237:42237)] 2024-12-02T21:12:50,245 DEBUG [Time-limited test {}] wal.AbstractFSWAL(751): hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/WALs/7d4f3b9a7081,45421,1733173881847/7d4f3b9a7081%2C45421%2C1733173881847.1733173882379 is not closed yet, will try archiving it next time 2024-12-02T21:12:50,246 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46855 is added to blk_1073741833_1009 (size=314980) 2024-12-02T21:12:50,246 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32945 is added to blk_1073741833_1009 (size=314980) 2024-12-02T21:12:50,247 INFO [Time-limited test {}] regionserver.HRegion(2837): Flushing 5dda45032950b51afffe60c272c5fabd 1/1 column families, dataSize=78 B heapSize=488 B 2024-12-02T21:12:50,262 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/hbase/namespace/5dda45032950b51afffe60c272c5fabd/.tmp/info/3a9bb4bb5ac1493da05f73d64c8791a0 is 45, key is default/info:d/1733173883325/Put/seqid=0 2024-12-02T21:12:50,266 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32945 is added to blk_1073741875_1051 (size=5037) 2024-12-02T21:12:50,266 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46855 is added to blk_1073741875_1051 (size=5037) 2024-12-02T21:12:50,266 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=78 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/hbase/namespace/5dda45032950b51afffe60c272c5fabd/.tmp/info/3a9bb4bb5ac1493da05f73d64c8791a0 2024-12-02T21:12:50,271 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/hbase/namespace/5dda45032950b51afffe60c272c5fabd/.tmp/info/3a9bb4bb5ac1493da05f73d64c8791a0 as hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/hbase/namespace/5dda45032950b51afffe60c272c5fabd/info/3a9bb4bb5ac1493da05f73d64c8791a0 2024-12-02T21:12:50,275 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/hbase/namespace/5dda45032950b51afffe60c272c5fabd/info/3a9bb4bb5ac1493da05f73d64c8791a0, entries=2, sequenceid=6, filesize=4.9 K 2024-12-02T21:12:50,276 INFO [Time-limited test {}] regionserver.HRegion(3040): Finished flush of dataSize ~78 B/78, heapSize ~472 B/472, currentSize=0 B/0 for 5dda45032950b51afffe60c272c5fabd in 29ms, sequenceid=6, compaction requested=false 2024-12-02T21:12:50,276 DEBUG [Time-limited test {}] regionserver.HRegion(2538): Flush status journal for 5dda45032950b51afffe60c272c5fabd: 2024-12-02T21:12:50,276 DEBUG [Time-limited test {}] regionserver.HRegion(2538): Flush status journal for cedffaa969ea2cf9d508ee3a4b7624cb: 2024-12-02T21:12:50,277 INFO [Time-limited test {}] regionserver.HRegion(2837): Flushing 1588230740 3/3 column families, dataSize=2.21 KB heapSize=4.13 KB 2024-12-02T21:12:50,280 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/hbase/meta/1588230740/.tmp/info/ef276ea4d253485ba87e70d84b2b65d1 is 193, key is TestLogRolling-testLogRolling,row0062,1733173907745.b778fbd7043cd29c14974d14143b03a8./info:regioninfo/1733173908437/Put/seqid=0 2024-12-02T21:12:50,285 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32945 is added to blk_1073741876_1052 (size=7803) 2024-12-02T21:12:50,286 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46855 is added to blk_1073741876_1052 (size=7803) 2024-12-02T21:12:50,286 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.21 KB at sequenceid=24 (bloomFilter=true), to=hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/hbase/meta/1588230740/.tmp/info/ef276ea4d253485ba87e70d84b2b65d1 2024-12-02T21:12:50,292 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/hbase/meta/1588230740/.tmp/info/ef276ea4d253485ba87e70d84b2b65d1 as hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/hbase/meta/1588230740/info/ef276ea4d253485ba87e70d84b2b65d1 2024-12-02T21:12:50,298 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/hbase/meta/1588230740/info/ef276ea4d253485ba87e70d84b2b65d1, entries=16, sequenceid=24, filesize=7.6 K 2024-12-02T21:12:50,299 INFO [Time-limited test {}] regionserver.HRegion(3040): Finished flush of dataSize ~2.21 KB/2260, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 1588230740 in 23ms, sequenceid=24, compaction requested=false 2024-12-02T21:12:50,299 DEBUG [Time-limited test {}] regionserver.HRegion(2538): Flush status journal for 1588230740: 2024-12-02T21:12:50,299 INFO [Time-limited test {}] regionserver.HRegion(2837): Flushing b778fbd7043cd29c14974d14143b03a8 1/1 column families, dataSize=4.20 KB heapSize=4.75 KB 2024-12-02T21:12:50,304 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/b778fbd7043cd29c14974d14143b03a8/.tmp/info/6efb52caedc941a59cc0fec929723212 is 1080, key is row0253/info:/1733173968234/Put/seqid=0 2024-12-02T21:12:50,309 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32945 is added to blk_1073741877_1053 (size=9278) 2024-12-02T21:12:50,309 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46855 is added to blk_1073741877_1053 (size=9278) 2024-12-02T21:12:50,309 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.20 KB at sequenceid=329 (bloomFilter=true), to=hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/b778fbd7043cd29c14974d14143b03a8/.tmp/info/6efb52caedc941a59cc0fec929723212 2024-12-02T21:12:50,314 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/b778fbd7043cd29c14974d14143b03a8/.tmp/info/6efb52caedc941a59cc0fec929723212 as hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/b778fbd7043cd29c14974d14143b03a8/info/6efb52caedc941a59cc0fec929723212 2024-12-02T21:12:50,318 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/b778fbd7043cd29c14974d14143b03a8/info/6efb52caedc941a59cc0fec929723212, entries=4, sequenceid=329, filesize=9.1 K 2024-12-02T21:12:50,319 INFO [Time-limited test {}] regionserver.HRegion(3040): Finished flush of dataSize ~4.20 KB/4304, heapSize ~4.73 KB/4848, currentSize=0 B/0 for b778fbd7043cd29c14974d14143b03a8 in 20ms, sequenceid=329, compaction requested=false 2024-12-02T21:12:50,319 DEBUG [Time-limited test {}] regionserver.HRegion(2538): Flush status journal for b778fbd7043cd29c14974d14143b03a8: 2024-12-02T21:12:50,319 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7d4f3b9a7081%2C45421%2C1733173881847.1733173970319 2024-12-02T21:12:50,325 INFO [Time-limited test {}] wal.AbstractFSWAL(837): Rolled WAL /user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/WALs/7d4f3b9a7081,45421,1733173881847/7d4f3b9a7081%2C45421%2C1733173881847.1733173970237 with entries=4, filesize=1.22 KB; new WAL /user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/WALs/7d4f3b9a7081,45421,1733173881847/7d4f3b9a7081%2C45421%2C1733173881847.1733173970319 2024-12-02T21:12:50,325 DEBUG [Time-limited test {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42237:42237),(127.0.0.1/127.0.0.1:33209:33209)] 2024-12-02T21:12:50,325 DEBUG [Time-limited test {}] wal.AbstractFSWAL(751): hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/WALs/7d4f3b9a7081,45421,1733173881847/7d4f3b9a7081%2C45421%2C1733173881847.1733173970237 is not closed yet, will try archiving it next time 2024-12-02T21:12:50,325 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(818): Archiving hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/WALs/7d4f3b9a7081,45421,1733173881847/7d4f3b9a7081%2C45421%2C1733173881847.1733173882379 to hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/oldWALs/7d4f3b9a7081%2C45421%2C1733173881847.1733173882379 2024-12-02T21:12:50,326 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [5,000] milli-secs(wait.for.ratio=[1]) 2024-12-02T21:12:50,326 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46855 is added to blk_1073741874_1050 (size=1255) 2024-12-02T21:12:50,326 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32945 is added to blk_1073741874_1050 (size=1255) 2024-12-02T21:12:50,327 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(818): Archiving hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/WALs/7d4f3b9a7081,45421,1733173881847/7d4f3b9a7081%2C45421%2C1733173881847.1733173970237 to hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/oldWALs/7d4f3b9a7081%2C45421%2C1733173881847.1733173970237 2024-12-02T21:12:50,426 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1340): Shutting down minicluster 2024-12-02T21:12:50,426 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-12-02T21:12:50,426 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x07e604e4 to 127.0.0.1:59514 2024-12-02T21:12:50,426 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T21:12:50,426 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-02T21:12:50,426 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=331385028, stopped=false 2024-12-02T21:12:50,426 INFO [Time-limited test {}] master.ServerManager(987): Cluster shutdown requested of master=7d4f3b9a7081,44741,1733173881701 2024-12-02T21:12:50,470 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45421-0x101992c4dcc0001, quorum=127.0.0.1:59514, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-02T21:12:50,470 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44741-0x101992c4dcc0000, quorum=127.0.0.1:59514, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-02T21:12:50,470 INFO [Time-limited test {}] procedure2.ProcedureExecutor(700): Stopping 2024-12-02T21:12:50,470 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44741-0x101992c4dcc0000, quorum=127.0.0.1:59514, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T21:12:50,470 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45421-0x101992c4dcc0001, quorum=127.0.0.1:59514, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T21:12:50,471 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T21:12:50,471 INFO [Time-limited test {}] regionserver.HRegionServer(2561): ***** STOPPING region server '7d4f3b9a7081,45421,1733173881847' ***** 2024-12-02T21:12:50,471 INFO [Time-limited test {}] regionserver.HRegionServer(2575): STOPPED: Shutdown requested 2024-12-02T21:12:50,471 INFO [RS:0;7d4f3b9a7081:45421 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-02T21:12:50,471 INFO [RS:0;7d4f3b9a7081:45421 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-02T21:12:50,471 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(373): MemStoreFlusher.0 exiting 2024-12-02T21:12:50,472 INFO [RS:0;7d4f3b9a7081:45421 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-02T21:12:50,472 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:45421-0x101992c4dcc0001, quorum=127.0.0.1:59514, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-02T21:12:50,472 INFO [RS:0;7d4f3b9a7081:45421 {}] regionserver.HRegionServer(3579): Received CLOSE for 5dda45032950b51afffe60c272c5fabd 2024-12-02T21:12:50,472 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:44741-0x101992c4dcc0000, quorum=127.0.0.1:59514, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-02T21:12:50,472 INFO [RS:0;7d4f3b9a7081:45421 {}] regionserver.HRegionServer(3579): Received CLOSE for cedffaa969ea2cf9d508ee3a4b7624cb 2024-12-02T21:12:50,472 INFO [RS:0;7d4f3b9a7081:45421 {}] regionserver.HRegionServer(3579): Received CLOSE for b778fbd7043cd29c14974d14143b03a8 2024-12-02T21:12:50,472 INFO [RS:0;7d4f3b9a7081:45421 {}] regionserver.HRegionServer(1224): stopping server 7d4f3b9a7081,45421,1733173881847 2024-12-02T21:12:50,472 DEBUG [RS_CLOSE_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1681): Closing 5dda45032950b51afffe60c272c5fabd, disabling compactions & flushes 2024-12-02T21:12:50,472 DEBUG [RS:0;7d4f3b9a7081:45421 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T21:12:50,472 INFO [RS_CLOSE_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1703): Closing region hbase:namespace,,1733173882841.5dda45032950b51afffe60c272c5fabd. 2024-12-02T21:12:50,472 INFO [RS:0;7d4f3b9a7081:45421 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-02T21:12:50,472 DEBUG [RS_CLOSE_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:namespace,,1733173882841.5dda45032950b51afffe60c272c5fabd. 2024-12-02T21:12:50,473 INFO [RS:0;7d4f3b9a7081:45421 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-02T21:12:50,473 DEBUG [RS_CLOSE_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1791): Acquired close lock on hbase:namespace,,1733173882841.5dda45032950b51afffe60c272c5fabd. after waiting 0 ms 2024-12-02T21:12:50,473 INFO [RS:0;7d4f3b9a7081:45421 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-02T21:12:50,473 DEBUG [RS_CLOSE_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1801): Updates disabled for region hbase:namespace,,1733173882841.5dda45032950b51afffe60c272c5fabd. 2024-12-02T21:12:50,473 INFO [RS:0;7d4f3b9a7081:45421 {}] regionserver.HRegionServer(3579): Received CLOSE for 1588230740 2024-12-02T21:12:50,473 INFO [RS:0;7d4f3b9a7081:45421 {}] regionserver.HRegionServer(1599): Waiting on 4 regions to close 2024-12-02T21:12:50,473 DEBUG [RS:0;7d4f3b9a7081:45421 {}] regionserver.HRegionServer(1603): Online Regions={5dda45032950b51afffe60c272c5fabd=hbase:namespace,,1733173882841.5dda45032950b51afffe60c272c5fabd., cedffaa969ea2cf9d508ee3a4b7624cb=TestLogRolling-testLogRolling,,1733173907745.cedffaa969ea2cf9d508ee3a4b7624cb., 1588230740=hbase:meta,,1.1588230740, b778fbd7043cd29c14974d14143b03a8=TestLogRolling-testLogRolling,row0062,1733173907745.b778fbd7043cd29c14974d14143b03a8.} 2024-12-02T21:12:50,473 DEBUG [RS:0;7d4f3b9a7081:45421 {}] regionserver.HRegionServer(1629): Waiting on 1588230740, 5dda45032950b51afffe60c272c5fabd, b778fbd7043cd29c14974d14143b03a8, cedffaa969ea2cf9d508ee3a4b7624cb 2024-12-02T21:12:50,473 DEBUG [RS_CLOSE_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1681): Closing 1588230740, disabling compactions & flushes 2024-12-02T21:12:50,474 INFO [RS_CLOSE_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1703): Closing region hbase:meta,,1.1588230740 2024-12-02T21:12:50,474 DEBUG [RS_CLOSE_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:meta,,1.1588230740 2024-12-02T21:12:50,474 DEBUG [RS_CLOSE_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1791): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-02T21:12:50,474 DEBUG [RS_CLOSE_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1801): Updates disabled for region hbase:meta,,1.1588230740 2024-12-02T21:12:50,478 DEBUG [RS_CLOSE_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/hbase/namespace/5dda45032950b51afffe60c272c5fabd/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-02T21:12:50,479 DEBUG [RS_CLOSE_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/hbase/meta/1588230740/recovered.edits/27.seqid, newMaxSeqId=27, maxSeqId=1 2024-12-02T21:12:50,479 INFO [RS_CLOSE_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1922): Closed hbase:namespace,,1733173882841.5dda45032950b51afffe60c272c5fabd. 2024-12-02T21:12:50,479 DEBUG [RS_CLOSE_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1635): Region close journal for 5dda45032950b51afffe60c272c5fabd: 2024-12-02T21:12:50,479 DEBUG [RS_CLOSE_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed hbase:namespace,,1733173882841.5dda45032950b51afffe60c272c5fabd. 2024-12-02T21:12:50,479 DEBUG [RS_CLOSE_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1681): Closing cedffaa969ea2cf9d508ee3a4b7624cb, disabling compactions & flushes 2024-12-02T21:12:50,479 INFO [RS_CLOSE_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1703): Closing region TestLogRolling-testLogRolling,,1733173907745.cedffaa969ea2cf9d508ee3a4b7624cb. 2024-12-02T21:12:50,479 DEBUG [RS_CLOSE_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestLogRolling-testLogRolling,,1733173907745.cedffaa969ea2cf9d508ee3a4b7624cb. 2024-12-02T21:12:50,479 DEBUG [RS_CLOSE_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1791): Acquired close lock on TestLogRolling-testLogRolling,,1733173907745.cedffaa969ea2cf9d508ee3a4b7624cb. after waiting 0 ms 2024-12-02T21:12:50,479 DEBUG [RS_CLOSE_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1801): Updates disabled for region TestLogRolling-testLogRolling,,1733173907745.cedffaa969ea2cf9d508ee3a4b7624cb. 2024-12-02T21:12:50,479 DEBUG [RS_CLOSE_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-02T21:12:50,479 INFO [RS_CLOSE_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1922): Closed hbase:meta,,1.1588230740 2024-12-02T21:12:50,479 DEBUG [RS_CLOSE_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1635): Region close journal for 1588230740: 2024-12-02T21:12:50,480 DEBUG [RS_CLOSE_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-02T21:12:50,480 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733173907745.cedffaa969ea2cf9d508ee3a4b7624cb.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/cedffaa969ea2cf9d508ee3a4b7624cb/info/969f3a1a960044a68450ea2d7111971e.9f99cceab3284d89ba5d313b73c9eac6->hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/9f99cceab3284d89ba5d313b73c9eac6/info/969f3a1a960044a68450ea2d7111971e-bottom] to archive 2024-12-02T21:12:50,480 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733173907745.cedffaa969ea2cf9d508ee3a4b7624cb.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-02T21:12:50,482 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733173907745.cedffaa969ea2cf9d508ee3a4b7624cb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/cedffaa969ea2cf9d508ee3a4b7624cb/info/969f3a1a960044a68450ea2d7111971e.9f99cceab3284d89ba5d313b73c9eac6 to hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/archive/data/default/TestLogRolling-testLogRolling/cedffaa969ea2cf9d508ee3a4b7624cb/info/969f3a1a960044a68450ea2d7111971e.9f99cceab3284d89ba5d313b73c9eac6 2024-12-02T21:12:50,486 DEBUG [RS_CLOSE_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/cedffaa969ea2cf9d508ee3a4b7624cb/recovered.edits/126.seqid, newMaxSeqId=126, maxSeqId=121 2024-12-02T21:12:50,486 INFO [RS_CLOSE_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1922): Closed TestLogRolling-testLogRolling,,1733173907745.cedffaa969ea2cf9d508ee3a4b7624cb. 2024-12-02T21:12:50,486 DEBUG [RS_CLOSE_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1635): Region close journal for cedffaa969ea2cf9d508ee3a4b7624cb: 2024-12-02T21:12:50,486 DEBUG [RS_CLOSE_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRolling,,1733173907745.cedffaa969ea2cf9d508ee3a4b7624cb. 2024-12-02T21:12:50,486 DEBUG [RS_CLOSE_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1681): Closing b778fbd7043cd29c14974d14143b03a8, disabling compactions & flushes 2024-12-02T21:12:50,486 INFO [RS_CLOSE_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1703): Closing region TestLogRolling-testLogRolling,row0062,1733173907745.b778fbd7043cd29c14974d14143b03a8. 2024-12-02T21:12:50,486 DEBUG [RS_CLOSE_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestLogRolling-testLogRolling,row0062,1733173907745.b778fbd7043cd29c14974d14143b03a8. 2024-12-02T21:12:50,486 DEBUG [RS_CLOSE_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1791): Acquired close lock on TestLogRolling-testLogRolling,row0062,1733173907745.b778fbd7043cd29c14974d14143b03a8. after waiting 0 ms 2024-12-02T21:12:50,486 DEBUG [RS_CLOSE_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1801): Updates disabled for region TestLogRolling-testLogRolling,row0062,1733173907745.b778fbd7043cd29c14974d14143b03a8. 2024-12-02T21:12:50,487 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733173907745.b778fbd7043cd29c14974d14143b03a8.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/b778fbd7043cd29c14974d14143b03a8/info/969f3a1a960044a68450ea2d7111971e.9f99cceab3284d89ba5d313b73c9eac6->hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/9f99cceab3284d89ba5d313b73c9eac6/info/969f3a1a960044a68450ea2d7111971e-top, hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/b778fbd7043cd29c14974d14143b03a8/info/TestLogRolling-testLogRolling=9f99cceab3284d89ba5d313b73c9eac6-d64c08c83314441a883c6723a70aae97, hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/b778fbd7043cd29c14974d14143b03a8/info/4ec11df23e7d45acaf2ccf627d5ba52e, hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/b778fbd7043cd29c14974d14143b03a8/info/TestLogRolling-testLogRolling=9f99cceab3284d89ba5d313b73c9eac6-9b8e63b38a93463dba9473c9ef4b5816, hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/b778fbd7043cd29c14974d14143b03a8/info/7d7080dd34e34636b7910bf843af287b, hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/b778fbd7043cd29c14974d14143b03a8/info/a93f5addea934584b1775db4cdc97610, hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/b778fbd7043cd29c14974d14143b03a8/info/b15409268e3b443aaf9532f08b22fa3b, hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/b778fbd7043cd29c14974d14143b03a8/info/b422e6363096422db3e93f80ccc39ef7, hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/b778fbd7043cd29c14974d14143b03a8/info/b884cfe3a91f437aba59500e658ebbb4, hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/b778fbd7043cd29c14974d14143b03a8/info/dbdec5c4848c4da0aa7b7364dc75e33e, hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/b778fbd7043cd29c14974d14143b03a8/info/25b2cb671281403ead48266c8e1da121, hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/b778fbd7043cd29c14974d14143b03a8/info/8e331dd0b6984755951528c31b1f9c14, hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/b778fbd7043cd29c14974d14143b03a8/info/9762a453bcd54ca186d750e02dd1461a, hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/b778fbd7043cd29c14974d14143b03a8/info/46d0979c67964d2ea9d621e72e94a8fb, hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/b778fbd7043cd29c14974d14143b03a8/info/f56e5d95ace24a73b306bee3cfb32e68, hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/b778fbd7043cd29c14974d14143b03a8/info/9fa81858c602493e85c80f3bb8a45a30, hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/b778fbd7043cd29c14974d14143b03a8/info/d110ad36115b41e8b5639901632fd80b, hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/b778fbd7043cd29c14974d14143b03a8/info/3e23c80ff10244c8ab636445ec0aae62, hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/b778fbd7043cd29c14974d14143b03a8/info/714231c026b6483681e334c873042721, hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/b778fbd7043cd29c14974d14143b03a8/info/d0e46cb0a82848e2b8b9135ffacddadc, hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/b778fbd7043cd29c14974d14143b03a8/info/c3114d3647ee41f2802d4e5d5465ecbd] to archive 2024-12-02T21:12:50,488 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733173907745.b778fbd7043cd29c14974d14143b03a8.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-02T21:12:50,489 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733173907745.b778fbd7043cd29c14974d14143b03a8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/b778fbd7043cd29c14974d14143b03a8/info/969f3a1a960044a68450ea2d7111971e.9f99cceab3284d89ba5d313b73c9eac6 to hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/archive/data/default/TestLogRolling-testLogRolling/b778fbd7043cd29c14974d14143b03a8/info/969f3a1a960044a68450ea2d7111971e.9f99cceab3284d89ba5d313b73c9eac6 2024-12-02T21:12:50,491 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733173907745.b778fbd7043cd29c14974d14143b03a8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/b778fbd7043cd29c14974d14143b03a8/info/TestLogRolling-testLogRolling=9f99cceab3284d89ba5d313b73c9eac6-d64c08c83314441a883c6723a70aae97 to hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/archive/data/default/TestLogRolling-testLogRolling/b778fbd7043cd29c14974d14143b03a8/info/TestLogRolling-testLogRolling=9f99cceab3284d89ba5d313b73c9eac6-d64c08c83314441a883c6723a70aae97 2024-12-02T21:12:50,492 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733173907745.b778fbd7043cd29c14974d14143b03a8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/b778fbd7043cd29c14974d14143b03a8/info/4ec11df23e7d45acaf2ccf627d5ba52e to hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/archive/data/default/TestLogRolling-testLogRolling/b778fbd7043cd29c14974d14143b03a8/info/4ec11df23e7d45acaf2ccf627d5ba52e 2024-12-02T21:12:50,494 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733173907745.b778fbd7043cd29c14974d14143b03a8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/b778fbd7043cd29c14974d14143b03a8/info/TestLogRolling-testLogRolling=9f99cceab3284d89ba5d313b73c9eac6-9b8e63b38a93463dba9473c9ef4b5816 to hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/archive/data/default/TestLogRolling-testLogRolling/b778fbd7043cd29c14974d14143b03a8/info/TestLogRolling-testLogRolling=9f99cceab3284d89ba5d313b73c9eac6-9b8e63b38a93463dba9473c9ef4b5816 2024-12-02T21:12:50,495 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733173907745.b778fbd7043cd29c14974d14143b03a8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/b778fbd7043cd29c14974d14143b03a8/info/7d7080dd34e34636b7910bf843af287b to hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/archive/data/default/TestLogRolling-testLogRolling/b778fbd7043cd29c14974d14143b03a8/info/7d7080dd34e34636b7910bf843af287b 2024-12-02T21:12:50,497 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733173907745.b778fbd7043cd29c14974d14143b03a8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/b778fbd7043cd29c14974d14143b03a8/info/a93f5addea934584b1775db4cdc97610 to hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/archive/data/default/TestLogRolling-testLogRolling/b778fbd7043cd29c14974d14143b03a8/info/a93f5addea934584b1775db4cdc97610 2024-12-02T21:12:50,498 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733173907745.b778fbd7043cd29c14974d14143b03a8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/b778fbd7043cd29c14974d14143b03a8/info/b15409268e3b443aaf9532f08b22fa3b to hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/archive/data/default/TestLogRolling-testLogRolling/b778fbd7043cd29c14974d14143b03a8/info/b15409268e3b443aaf9532f08b22fa3b 2024-12-02T21:12:50,500 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733173907745.b778fbd7043cd29c14974d14143b03a8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/b778fbd7043cd29c14974d14143b03a8/info/b422e6363096422db3e93f80ccc39ef7 to hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/archive/data/default/TestLogRolling-testLogRolling/b778fbd7043cd29c14974d14143b03a8/info/b422e6363096422db3e93f80ccc39ef7 2024-12-02T21:12:50,501 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733173907745.b778fbd7043cd29c14974d14143b03a8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/b778fbd7043cd29c14974d14143b03a8/info/b884cfe3a91f437aba59500e658ebbb4 to hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/archive/data/default/TestLogRolling-testLogRolling/b778fbd7043cd29c14974d14143b03a8/info/b884cfe3a91f437aba59500e658ebbb4 2024-12-02T21:12:50,502 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733173907745.b778fbd7043cd29c14974d14143b03a8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/b778fbd7043cd29c14974d14143b03a8/info/dbdec5c4848c4da0aa7b7364dc75e33e to hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/archive/data/default/TestLogRolling-testLogRolling/b778fbd7043cd29c14974d14143b03a8/info/dbdec5c4848c4da0aa7b7364dc75e33e 2024-12-02T21:12:50,504 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733173907745.b778fbd7043cd29c14974d14143b03a8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/b778fbd7043cd29c14974d14143b03a8/info/25b2cb671281403ead48266c8e1da121 to hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/archive/data/default/TestLogRolling-testLogRolling/b778fbd7043cd29c14974d14143b03a8/info/25b2cb671281403ead48266c8e1da121 2024-12-02T21:12:50,505 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733173907745.b778fbd7043cd29c14974d14143b03a8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/b778fbd7043cd29c14974d14143b03a8/info/8e331dd0b6984755951528c31b1f9c14 to hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/archive/data/default/TestLogRolling-testLogRolling/b778fbd7043cd29c14974d14143b03a8/info/8e331dd0b6984755951528c31b1f9c14 2024-12-02T21:12:50,506 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733173907745.b778fbd7043cd29c14974d14143b03a8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/b778fbd7043cd29c14974d14143b03a8/info/9762a453bcd54ca186d750e02dd1461a to hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/archive/data/default/TestLogRolling-testLogRolling/b778fbd7043cd29c14974d14143b03a8/info/9762a453bcd54ca186d750e02dd1461a 2024-12-02T21:12:50,508 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733173907745.b778fbd7043cd29c14974d14143b03a8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/b778fbd7043cd29c14974d14143b03a8/info/46d0979c67964d2ea9d621e72e94a8fb to hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/archive/data/default/TestLogRolling-testLogRolling/b778fbd7043cd29c14974d14143b03a8/info/46d0979c67964d2ea9d621e72e94a8fb 2024-12-02T21:12:50,509 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733173907745.b778fbd7043cd29c14974d14143b03a8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/b778fbd7043cd29c14974d14143b03a8/info/f56e5d95ace24a73b306bee3cfb32e68 to hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/archive/data/default/TestLogRolling-testLogRolling/b778fbd7043cd29c14974d14143b03a8/info/f56e5d95ace24a73b306bee3cfb32e68 2024-12-02T21:12:50,511 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733173907745.b778fbd7043cd29c14974d14143b03a8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/b778fbd7043cd29c14974d14143b03a8/info/9fa81858c602493e85c80f3bb8a45a30 to hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/archive/data/default/TestLogRolling-testLogRolling/b778fbd7043cd29c14974d14143b03a8/info/9fa81858c602493e85c80f3bb8a45a30 2024-12-02T21:12:50,512 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733173907745.b778fbd7043cd29c14974d14143b03a8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/b778fbd7043cd29c14974d14143b03a8/info/d110ad36115b41e8b5639901632fd80b to hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/archive/data/default/TestLogRolling-testLogRolling/b778fbd7043cd29c14974d14143b03a8/info/d110ad36115b41e8b5639901632fd80b 2024-12-02T21:12:50,514 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733173907745.b778fbd7043cd29c14974d14143b03a8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/b778fbd7043cd29c14974d14143b03a8/info/3e23c80ff10244c8ab636445ec0aae62 to hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/archive/data/default/TestLogRolling-testLogRolling/b778fbd7043cd29c14974d14143b03a8/info/3e23c80ff10244c8ab636445ec0aae62 2024-12-02T21:12:50,515 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733173907745.b778fbd7043cd29c14974d14143b03a8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/b778fbd7043cd29c14974d14143b03a8/info/714231c026b6483681e334c873042721 to hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/archive/data/default/TestLogRolling-testLogRolling/b778fbd7043cd29c14974d14143b03a8/info/714231c026b6483681e334c873042721 2024-12-02T21:12:50,517 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733173907745.b778fbd7043cd29c14974d14143b03a8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/b778fbd7043cd29c14974d14143b03a8/info/d0e46cb0a82848e2b8b9135ffacddadc to hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/archive/data/default/TestLogRolling-testLogRolling/b778fbd7043cd29c14974d14143b03a8/info/d0e46cb0a82848e2b8b9135ffacddadc 2024-12-02T21:12:50,518 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733173907745.b778fbd7043cd29c14974d14143b03a8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/b778fbd7043cd29c14974d14143b03a8/info/c3114d3647ee41f2802d4e5d5465ecbd to hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/archive/data/default/TestLogRolling-testLogRolling/b778fbd7043cd29c14974d14143b03a8/info/c3114d3647ee41f2802d4e5d5465ecbd 2024-12-02T21:12:50,523 DEBUG [RS_CLOSE_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/data/default/TestLogRolling-testLogRolling/b778fbd7043cd29c14974d14143b03a8/recovered.edits/332.seqid, newMaxSeqId=332, maxSeqId=121 2024-12-02T21:12:50,524 INFO [RS_CLOSE_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1922): Closed TestLogRolling-testLogRolling,row0062,1733173907745.b778fbd7043cd29c14974d14143b03a8. 2024-12-02T21:12:50,524 DEBUG [RS_CLOSE_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1635): Region close journal for b778fbd7043cd29c14974d14143b03a8: 2024-12-02T21:12:50,524 DEBUG [RS_CLOSE_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRolling,row0062,1733173907745.b778fbd7043cd29c14974d14143b03a8. 2024-12-02T21:12:50,538 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.meta.1733173708307.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:12:50,674 INFO [RS:0;7d4f3b9a7081:45421 {}] regionserver.HRegionServer(1250): stopping server 7d4f3b9a7081,45421,1733173881847; all regions closed. 2024-12-02T21:12:50,675 DEBUG [WAL-Shutdown-0 {}] wal.FSHLog(499): Closing WAL writer in /user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/WALs/7d4f3b9a7081,45421,1733173881847 2024-12-02T21:12:50,679 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46855 is added to blk_1073741834_1010 (size=9351) 2024-12-02T21:12:50,680 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32945 is added to blk_1073741834_1010 (size=9351) 2024-12-02T21:12:50,684 DEBUG [RS:0;7d4f3b9a7081:45421 {}] wal.AbstractFSWAL(1071): Moved 1 WAL file(s) to /user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/oldWALs 2024-12-02T21:12:50,684 INFO [RS:0;7d4f3b9a7081:45421 {}] wal.AbstractFSWAL(1074): Closed WAL: FSHLog 7d4f3b9a7081%2C45421%2C1733173881847.meta:.meta(num 1733173882748) 2024-12-02T21:12:50,685 DEBUG [WAL-Shutdown-0 {}] wal.FSHLog(499): Closing WAL writer in /user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/WALs/7d4f3b9a7081,45421,1733173881847 2024-12-02T21:12:50,686 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32945 is added to blk_1073741878_1054 (size=1071) 2024-12-02T21:12:50,687 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46855 is added to blk_1073741878_1054 (size=1071) 2024-12-02T21:12:50,688 DEBUG [RS:0;7d4f3b9a7081:45421 {}] wal.AbstractFSWAL(1071): Moved 1 WAL file(s) to /user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/oldWALs 2024-12-02T21:12:50,688 INFO [RS:0;7d4f3b9a7081:45421 {}] wal.AbstractFSWAL(1074): Closed WAL: FSHLog 7d4f3b9a7081%2C45421%2C1733173881847:(num 1733173970319) 2024-12-02T21:12:50,688 DEBUG [RS:0;7d4f3b9a7081:45421 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T21:12:50,688 INFO [RS:0;7d4f3b9a7081:45421 {}] regionserver.LeaseManager(133): Closed leases 2024-12-02T21:12:50,688 INFO [RS:0;7d4f3b9a7081:45421 {}] hbase.ChoreService(370): Chore service for: regionserver/7d4f3b9a7081:0 had [ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-12-02T21:12:50,688 INFO [regionserver/7d4f3b9a7081:0.logRoller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-12-02T21:12:50,689 INFO [RS:0;7d4f3b9a7081:45421 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:45421 2024-12-02T21:12:50,695 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44741-0x101992c4dcc0000, quorum=127.0.0.1:59514, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-02T21:12:50,695 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45421-0x101992c4dcc0001, quorum=127.0.0.1:59514, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/7d4f3b9a7081,45421,1733173881847 2024-12-02T21:12:50,703 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [7d4f3b9a7081,45421,1733173881847] 2024-12-02T21:12:50,703 DEBUG [RegionServerTracker-0 {}] master.DeadServer(103): Processing 7d4f3b9a7081,45421,1733173881847; numProcessing=1 2024-12-02T21:12:50,712 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/draining/7d4f3b9a7081,45421,1733173881847 already deleted, retry=false 2024-12-02T21:12:50,712 INFO [RegionServerTracker-0 {}] master.ServerManager(652): Cluster shutdown set; 7d4f3b9a7081,45421,1733173881847 expired; onlineServers=0 2024-12-02T21:12:50,712 INFO [RegionServerTracker-0 {}] regionserver.HRegionServer(2561): ***** STOPPING region server '7d4f3b9a7081,44741,1733173881701' ***** 2024-12-02T21:12:50,712 INFO [RegionServerTracker-0 {}] regionserver.HRegionServer(2575): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-02T21:12:50,712 DEBUG [M:0;7d4f3b9a7081:44741 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1a8e9132, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=7d4f3b9a7081/172.17.0.2:0 2024-12-02T21:12:50,712 INFO [M:0;7d4f3b9a7081:44741 {}] regionserver.HRegionServer(1224): stopping server 7d4f3b9a7081,44741,1733173881701 2024-12-02T21:12:50,712 INFO [M:0;7d4f3b9a7081:44741 {}] regionserver.HRegionServer(1250): stopping server 7d4f3b9a7081,44741,1733173881701; all regions closed. 2024-12-02T21:12:50,712 DEBUG [M:0;7d4f3b9a7081:44741 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T21:12:50,712 DEBUG [M:0;7d4f3b9a7081:44741 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-02T21:12:50,712 DEBUG [M:0;7d4f3b9a7081:44741 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-02T21:12:50,712 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-02T21:12:50,712 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster-HFileCleaner.large.0-1733173882128 {}] cleaner.HFileCleaner(306): Exit Thread[master/7d4f3b9a7081:0:becomeActiveMaster-HFileCleaner.large.0-1733173882128,5,FailOnTimeoutGroup] 2024-12-02T21:12:50,712 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster-HFileCleaner.small.0-1733173882128 {}] cleaner.HFileCleaner(306): Exit Thread[master/7d4f3b9a7081:0:becomeActiveMaster-HFileCleaner.small.0-1733173882128,5,FailOnTimeoutGroup] 2024-12-02T21:12:50,713 INFO [M:0;7d4f3b9a7081:44741 {}] hbase.ChoreService(370): Chore service for: master/7d4f3b9a7081:0 had [] on shutdown 2024-12-02T21:12:50,713 DEBUG [M:0;7d4f3b9a7081:44741 {}] master.HMaster(1733): Stopping service threads 2024-12-02T21:12:50,713 INFO [M:0;7d4f3b9a7081:44741 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-02T21:12:50,713 INFO [M:0;7d4f3b9a7081:44741 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-02T21:12:50,713 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-02T21:12:50,720 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44741-0x101992c4dcc0000, quorum=127.0.0.1:59514, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-02T21:12:50,720 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44741-0x101992c4dcc0000, quorum=127.0.0.1:59514, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T21:12:50,720 DEBUG [M:0;7d4f3b9a7081:44741 {}] zookeeper.ZKUtil(347): master:44741-0x101992c4dcc0000, quorum=127.0.0.1:59514, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-02T21:12:50,720 WARN [M:0;7d4f3b9a7081:44741 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-02T21:12:50,720 INFO [M:0;7d4f3b9a7081:44741 {}] assignment.AssignmentManager(391): Stopping assignment manager 2024-12-02T21:12:50,721 INFO [M:0;7d4f3b9a7081:44741 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-02T21:12:50,721 DEBUG [M:0;7d4f3b9a7081:44741 {}] regionserver.HRegion(1681): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-02T21:12:50,721 INFO [M:0;7d4f3b9a7081:44741 {}] regionserver.HRegion(1703): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-02T21:12:50,721 DEBUG [M:0;7d4f3b9a7081:44741 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-02T21:12:50,721 DEBUG [M:0;7d4f3b9a7081:44741 {}] regionserver.HRegion(1791): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-02T21:12:50,721 DEBUG [M:0;7d4f3b9a7081:44741 {}] regionserver.HRegion(1801): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-02T21:12:50,721 INFO [M:0;7d4f3b9a7081:44741 {}] regionserver.HRegion(2837): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=66.42 KB heapSize=81.66 KB 2024-12-02T21:12:50,721 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:44741-0x101992c4dcc0000, quorum=127.0.0.1:59514, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-02T21:12:50,738 DEBUG [M:0;7d4f3b9a7081:44741 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/6bd7e4c5c2d049a6aa31dc0864236efc is 82, key is hbase:meta,,1/info:regioninfo/1733173882770/Put/seqid=0 2024-12-02T21:12:50,743 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32945 is added to blk_1073741879_1055 (size=5672) 2024-12-02T21:12:50,743 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46855 is added to blk_1073741879_1055 (size=5672) 2024-12-02T21:12:50,743 INFO [M:0;7d4f3b9a7081:44741 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=164 (bloomFilter=true), to=hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/6bd7e4c5c2d049a6aa31dc0864236efc 2024-12-02T21:12:50,761 DEBUG [M:0;7d4f3b9a7081:44741 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/bccfb382ee3343209494891e6ab66057 is 749, key is \x00\x00\x00\x00\x00\x00\x00\x09/proc:d/1733173883889/Put/seqid=0 2024-12-02T21:12:50,765 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46855 is added to blk_1073741880_1056 (size=7284) 2024-12-02T21:12:50,766 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32945 is added to blk_1073741880_1056 (size=7284) 2024-12-02T21:12:50,766 INFO [M:0;7d4f3b9a7081:44741 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65.82 KB at sequenceid=164 (bloomFilter=true), to=hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/bccfb382ee3343209494891e6ab66057 2024-12-02T21:12:50,770 INFO [M:0;7d4f3b9a7081:44741 {}] regionserver.StoreFileReader(539): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for bccfb382ee3343209494891e6ab66057 2024-12-02T21:12:50,785 DEBUG [M:0;7d4f3b9a7081:44741 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/2aa1a91a5b05415094b6ef2d31ad52d3 is 69, key is 7d4f3b9a7081,45421,1733173881847/rs:state/1733173882219/Put/seqid=0 2024-12-02T21:12:50,789 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32945 is added to blk_1073741881_1057 (size=5156) 2024-12-02T21:12:50,789 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46855 is added to blk_1073741881_1057 (size=5156) 2024-12-02T21:12:50,789 INFO [M:0;7d4f3b9a7081:44741 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=164 (bloomFilter=true), to=hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/2aa1a91a5b05415094b6ef2d31ad52d3 2024-12-02T21:12:50,803 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45421-0x101992c4dcc0001, quorum=127.0.0.1:59514, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-02T21:12:50,803 INFO [RS:0;7d4f3b9a7081:45421 {}] regionserver.HRegionServer(1307): Exiting; stopping=7d4f3b9a7081,45421,1733173881847; zookeeper connection closed. 2024-12-02T21:12:50,803 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45421-0x101992c4dcc0001, quorum=127.0.0.1:59514, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-02T21:12:50,804 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@3a4b31ec {}] hbase.MiniHBaseCluster$SingleFileSystemShutdownThread(216): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@3a4b31ec 2024-12-02T21:12:50,804 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-12-02T21:12:50,806 DEBUG [M:0;7d4f3b9a7081:44741 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/e4e6767caff24a03a34d7ce9f6d97a31 is 52, key is load_balancer_on/state:d/1733173883495/Put/seqid=0 2024-12-02T21:12:50,810 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46855 is added to blk_1073741882_1058 (size=5056) 2024-12-02T21:12:50,811 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32945 is added to blk_1073741882_1058 (size=5056) 2024-12-02T21:12:50,811 INFO [M:0;7d4f3b9a7081:44741 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=164 (bloomFilter=true), to=hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/e4e6767caff24a03a34d7ce9f6d97a31 2024-12-02T21:12:50,815 DEBUG [M:0;7d4f3b9a7081:44741 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/6bd7e4c5c2d049a6aa31dc0864236efc as hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/6bd7e4c5c2d049a6aa31dc0864236efc 2024-12-02T21:12:50,819 INFO [M:0;7d4f3b9a7081:44741 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/6bd7e4c5c2d049a6aa31dc0864236efc, entries=8, sequenceid=164, filesize=5.5 K 2024-12-02T21:12:50,820 DEBUG [M:0;7d4f3b9a7081:44741 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/bccfb382ee3343209494891e6ab66057 as hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/bccfb382ee3343209494891e6ab66057 2024-12-02T21:12:50,825 INFO [M:0;7d4f3b9a7081:44741 {}] regionserver.StoreFileReader(539): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for bccfb382ee3343209494891e6ab66057 2024-12-02T21:12:50,825 INFO [M:0;7d4f3b9a7081:44741 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/bccfb382ee3343209494891e6ab66057, entries=18, sequenceid=164, filesize=7.1 K 2024-12-02T21:12:50,826 DEBUG [M:0;7d4f3b9a7081:44741 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/2aa1a91a5b05415094b6ef2d31ad52d3 as hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/2aa1a91a5b05415094b6ef2d31ad52d3 2024-12-02T21:12:50,830 INFO [M:0;7d4f3b9a7081:44741 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/2aa1a91a5b05415094b6ef2d31ad52d3, entries=1, sequenceid=164, filesize=5.0 K 2024-12-02T21:12:50,831 DEBUG [M:0;7d4f3b9a7081:44741 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/e4e6767caff24a03a34d7ce9f6d97a31 as hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/e4e6767caff24a03a34d7ce9f6d97a31 2024-12-02T21:12:50,835 INFO [M:0;7d4f3b9a7081:44741 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38991/user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/e4e6767caff24a03a34d7ce9f6d97a31, entries=1, sequenceid=164, filesize=4.9 K 2024-12-02T21:12:50,836 INFO [M:0;7d4f3b9a7081:44741 {}] regionserver.HRegion(3040): Finished flush of dataSize ~66.42 KB/68019, heapSize ~81.60 KB/83560, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 115ms, sequenceid=164, compaction requested=false 2024-12-02T21:12:50,837 INFO [M:0;7d4f3b9a7081:44741 {}] regionserver.HRegion(1922): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-02T21:12:50,837 DEBUG [M:0;7d4f3b9a7081:44741 {}] regionserver.HRegion(1635): Region close journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-02T21:12:50,838 DEBUG [WAL-Shutdown-0 {}] wal.FSHLog(499): Closing WAL writer in /user/jenkins/test-data/323dc30b-4318-278f-57db-7c089b12fa30/MasterData/WALs/7d4f3b9a7081,44741,1733173881701 2024-12-02T21:12:50,839 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32945 is added to blk_1073741830_1006 (size=79248) 2024-12-02T21:12:50,840 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46855 is added to blk_1073741830_1006 (size=79248) 2024-12-02T21:12:51,242 INFO [M:0;7d4f3b9a7081:44741 {}] flush.MasterFlushTableProcedureManager(91): stop: server shutting down. 2024-12-02T21:12:51,242 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-12-02T21:12:51,242 INFO [M:0;7d4f3b9a7081:44741 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:44741 2024-12-02T21:12:51,279 DEBUG [M:0;7d4f3b9a7081:44741 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/rs/7d4f3b9a7081,44741,1733173881701 already deleted, retry=false 2024-12-02T21:12:51,387 INFO [M:0;7d4f3b9a7081:44741 {}] regionserver.HRegionServer(1307): Exiting; stopping=7d4f3b9a7081,44741,1733173881701; zookeeper connection closed. 2024-12-02T21:12:51,387 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44741-0x101992c4dcc0000, quorum=127.0.0.1:59514, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-02T21:12:51,387 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44741-0x101992c4dcc0000, quorum=127.0.0.1:59514, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-02T21:12:51,419 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7f3995f0{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-02T21:12:51,419 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@2eb7c5c4{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-02T21:12:51,419 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-02T21:12:51,420 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5e53ee54{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-02T21:12:51,420 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6bdeed05{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/6f9b010f-f1bb-e267-e41f-dde7c2a41da4/hadoop.log.dir/,STOPPED} 2024-12-02T21:12:51,423 WARN [BP-1860221667-172.17.0.2-1733173880307 heartbeating to localhost/127.0.0.1:38991 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-02T21:12:51,423 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-02T21:12:51,423 WARN [BP-1860221667-172.17.0.2-1733173880307 heartbeating to localhost/127.0.0.1:38991 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1860221667-172.17.0.2-1733173880307 (Datanode Uuid ddbdc62c-1399-4729-ab2a-c797860c0e62) service to localhost/127.0.0.1:38991 2024-12-02T21:12:51,423 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-02T21:12:51,424 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/6f9b010f-f1bb-e267-e41f-dde7c2a41da4/cluster_fa39a46f-f7b4-88f9-3eda-0cc056d788d5/dfs/data/data3/current/BP-1860221667-172.17.0.2-1733173880307 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-02T21:12:51,424 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/6f9b010f-f1bb-e267-e41f-dde7c2a41da4/cluster_fa39a46f-f7b4-88f9-3eda-0cc056d788d5/dfs/data/data4/current/BP-1860221667-172.17.0.2-1733173880307 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-02T21:12:51,425 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-02T21:12:51,428 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@45272860{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-02T21:12:51,429 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@613ebde1{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-02T21:12:51,429 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-02T21:12:51,429 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@76d04136{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-02T21:12:51,429 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@22a22fa6{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/6f9b010f-f1bb-e267-e41f-dde7c2a41da4/hadoop.log.dir/,STOPPED} 2024-12-02T21:12:51,430 WARN [BP-1860221667-172.17.0.2-1733173880307 heartbeating to localhost/127.0.0.1:38991 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-02T21:12:51,431 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-02T21:12:51,431 WARN [BP-1860221667-172.17.0.2-1733173880307 heartbeating to localhost/127.0.0.1:38991 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1860221667-172.17.0.2-1733173880307 (Datanode Uuid b3e2690e-061b-4518-862a-5786927a1c7e) service to localhost/127.0.0.1:38991 2024-12-02T21:12:51,431 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-02T21:12:51,431 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/6f9b010f-f1bb-e267-e41f-dde7c2a41da4/cluster_fa39a46f-f7b4-88f9-3eda-0cc056d788d5/dfs/data/data1/current/BP-1860221667-172.17.0.2-1733173880307 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-02T21:12:51,431 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/6f9b010f-f1bb-e267-e41f-dde7c2a41da4/cluster_fa39a46f-f7b4-88f9-3eda-0cc056d788d5/dfs/data/data2/current/BP-1860221667-172.17.0.2-1733173880307 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-02T21:12:51,431 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-02T21:12:51,437 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@6de7bcd8{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-02T21:12:51,437 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@69205fd0{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-02T21:12:51,437 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-02T21:12:51,438 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@71b9bf54{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-02T21:12:51,438 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@450ce414{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/6f9b010f-f1bb-e267-e41f-dde7c2a41da4/hadoop.log.dir/,STOPPED} 2024-12-02T21:12:51,443 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(345): Shutdown MiniZK cluster with all ZK servers 2024-12-02T21:12:51,471 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1347): Minicluster is down 2024-12-02T21:12:51,476 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRolling Thread=125 (was 112) - Thread LEAK? -, OpenFileDescriptor=487 (was 464) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=68 (was 109), ProcessCount=11 (was 11), AvailableMemoryMB=6743 (was 7069) 2024-12-02T21:12:51,482 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnNothingWritten Thread=126, OpenFileDescriptor=487, MaxFileDescriptor=1048576, SystemLoadAverage=68, ProcessCount=11, AvailableMemoryMB=6743 2024-12-02T21:12:51,482 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1126): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-02T21:12:51,482 INFO [Time-limited test {}] hbase.HBaseTestingUtility(451): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/6f9b010f-f1bb-e267-e41f-dde7c2a41da4/hadoop.log.dir so I do NOT create it in target/test-data/585d9600-f30d-a03e-c8cb-89565478f6d7 2024-12-02T21:12:51,482 INFO [Time-limited test {}] hbase.HBaseTestingUtility(451): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/6f9b010f-f1bb-e267-e41f-dde7c2a41da4/hadoop.tmp.dir so I do NOT create it in target/test-data/585d9600-f30d-a03e-c8cb-89565478f6d7 2024-12-02T21:12:51,482 INFO [Time-limited test {}] hbase.HBaseZKTestingUtility(82): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/585d9600-f30d-a03e-c8cb-89565478f6d7/cluster_d1106b91-8d14-61f9-5ce5-728fd048ffe2, deleteOnExit=true 2024-12-02T21:12:51,482 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1140): STARTING DFS 2024-12-02T21:12:51,482 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/585d9600-f30d-a03e-c8cb-89565478f6d7/test.cache.data in system properties and HBase conf 2024-12-02T21:12:51,483 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/585d9600-f30d-a03e-c8cb-89565478f6d7/hadoop.tmp.dir in system properties and HBase conf 2024-12-02T21:12:51,483 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/585d9600-f30d-a03e-c8cb-89565478f6d7/hadoop.log.dir in system properties and HBase conf 2024-12-02T21:12:51,483 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/585d9600-f30d-a03e-c8cb-89565478f6d7/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-02T21:12:51,483 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/585d9600-f30d-a03e-c8cb-89565478f6d7/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-02T21:12:51,483 INFO [Time-limited test {}] hbase.HBaseTestingUtility(811): read short circuit is OFF 2024-12-02T21:12:51,483 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-02T21:12:51,483 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/585d9600-f30d-a03e-c8cb-89565478f6d7/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-02T21:12:51,483 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/585d9600-f30d-a03e-c8cb-89565478f6d7/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-02T21:12:51,483 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/585d9600-f30d-a03e-c8cb-89565478f6d7/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-02T21:12:51,483 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/585d9600-f30d-a03e-c8cb-89565478f6d7/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-02T21:12:51,483 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/585d9600-f30d-a03e-c8cb-89565478f6d7/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-02T21:12:51,483 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/585d9600-f30d-a03e-c8cb-89565478f6d7/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-02T21:12:51,483 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/585d9600-f30d-a03e-c8cb-89565478f6d7/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-02T21:12:51,483 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/585d9600-f30d-a03e-c8cb-89565478f6d7/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-02T21:12:51,484 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/585d9600-f30d-a03e-c8cb-89565478f6d7/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-02T21:12:51,484 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/585d9600-f30d-a03e-c8cb-89565478f6d7/nfs.dump.dir in system properties and HBase conf 2024-12-02T21:12:51,484 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/585d9600-f30d-a03e-c8cb-89565478f6d7/java.io.tmpdir in system properties and HBase conf 2024-12-02T21:12:51,484 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/585d9600-f30d-a03e-c8cb-89565478f6d7/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-02T21:12:51,484 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/585d9600-f30d-a03e-c8cb-89565478f6d7/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-02T21:12:51,484 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/585d9600-f30d-a03e-c8cb-89565478f6d7/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-02T21:12:51,496 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-02T21:12:51,539 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.meta.1733173708307.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:12:51,753 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-02T21:12:51,756 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-02T21:12:51,757 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-02T21:12:51,757 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-02T21:12:51,757 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-02T21:12:51,758 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-02T21:12:51,758 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@33224ca9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/585d9600-f30d-a03e-c8cb-89565478f6d7/hadoop.log.dir/,AVAILABLE} 2024-12-02T21:12:51,758 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@59c5fb4b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-02T21:12:51,847 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1b720e42{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/585d9600-f30d-a03e-c8cb-89565478f6d7/java.io.tmpdir/jetty-localhost-42843-hadoop-hdfs-3_4_1-tests_jar-_-any-4672392253047973876/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-02T21:12:51,848 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@542dfbed{HTTP/1.1, (http/1.1)}{localhost:42843} 2024-12-02T21:12:51,848 INFO [Time-limited test {}] server.Server(415): Started @383228ms 2024-12-02T21:12:51,858 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-02T21:12:52,005 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-02T21:12:52,009 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-02T21:12:52,011 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-02T21:12:52,011 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-02T21:12:52,011 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-02T21:12:52,013 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@68c8979e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/585d9600-f30d-a03e-c8cb-89565478f6d7/hadoop.log.dir/,AVAILABLE} 2024-12-02T21:12:52,014 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@39941173{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-02T21:12:52,103 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@53367781{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/585d9600-f30d-a03e-c8cb-89565478f6d7/java.io.tmpdir/jetty-localhost-45613-hadoop-hdfs-3_4_1-tests_jar-_-any-3546288336567702833/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-02T21:12:52,103 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@9e26b70{HTTP/1.1, (http/1.1)}{localhost:45613} 2024-12-02T21:12:52,103 INFO [Time-limited test {}] server.Server(415): Started @383483ms 2024-12-02T21:12:52,104 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-02T21:12:52,128 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-02T21:12:52,130 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-02T21:12:52,130 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-02T21:12:52,130 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-02T21:12:52,130 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-02T21:12:52,131 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4b72c457{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/585d9600-f30d-a03e-c8cb-89565478f6d7/hadoop.log.dir/,AVAILABLE} 2024-12-02T21:12:52,131 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1aef51eb{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-02T21:12:52,222 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@654055a1{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/585d9600-f30d-a03e-c8cb-89565478f6d7/java.io.tmpdir/jetty-localhost-35855-hadoop-hdfs-3_4_1-tests_jar-_-any-11050499293098827091/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-02T21:12:52,222 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@30fa4dee{HTTP/1.1, (http/1.1)}{localhost:35855} 2024-12-02T21:12:52,222 INFO [Time-limited test {}] server.Server(415): Started @383602ms 2024-12-02T21:12:52,223 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-02T21:12:52,252 INFO [regionserver/7d4f3b9a7081:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-02T21:12:52,540 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.meta.1733173708307.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:12:52,810 WARN [Thread-2234 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/585d9600-f30d-a03e-c8cb-89565478f6d7/cluster_d1106b91-8d14-61f9-5ce5-728fd048ffe2/dfs/data/data1/current/BP-1842846650-172.17.0.2-1733173971512/current, will proceed with Du for space computation calculation, 2024-12-02T21:12:52,810 WARN [Thread-2235 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/585d9600-f30d-a03e-c8cb-89565478f6d7/cluster_d1106b91-8d14-61f9-5ce5-728fd048ffe2/dfs/data/data2/current/BP-1842846650-172.17.0.2-1733173971512/current, will proceed with Du for space computation calculation, 2024-12-02T21:12:52,827 WARN [Thread-2198 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-02T21:12:52,829 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x83a1b5c97c48fcb8 with lease ID 0x7e566f4a40561f59: Processing first storage report for DS-d8faff65-6778-4995-8ab6-5c2a5ea8afcd from datanode DatanodeRegistration(127.0.0.1:44483, datanodeUuid=78737652-ad5c-4abe-aaab-05304046d616, infoPort=39977, infoSecurePort=0, ipcPort=40489, storageInfo=lv=-57;cid=testClusterID;nsid=17149769;c=1733173971512) 2024-12-02T21:12:52,829 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x83a1b5c97c48fcb8 with lease ID 0x7e566f4a40561f59: from storage DS-d8faff65-6778-4995-8ab6-5c2a5ea8afcd node DatanodeRegistration(127.0.0.1:44483, datanodeUuid=78737652-ad5c-4abe-aaab-05304046d616, infoPort=39977, infoSecurePort=0, ipcPort=40489, storageInfo=lv=-57;cid=testClusterID;nsid=17149769;c=1733173971512), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-02T21:12:52,829 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x83a1b5c97c48fcb8 with lease ID 0x7e566f4a40561f59: Processing first storage report for DS-9e533251-0299-47e3-9b24-f1aa048aefd2 from datanode DatanodeRegistration(127.0.0.1:44483, datanodeUuid=78737652-ad5c-4abe-aaab-05304046d616, infoPort=39977, infoSecurePort=0, ipcPort=40489, storageInfo=lv=-57;cid=testClusterID;nsid=17149769;c=1733173971512) 2024-12-02T21:12:52,829 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x83a1b5c97c48fcb8 with lease ID 0x7e566f4a40561f59: from storage DS-9e533251-0299-47e3-9b24-f1aa048aefd2 node DatanodeRegistration(127.0.0.1:44483, datanodeUuid=78737652-ad5c-4abe-aaab-05304046d616, infoPort=39977, infoSecurePort=0, ipcPort=40489, storageInfo=lv=-57;cid=testClusterID;nsid=17149769;c=1733173971512), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-02T21:12:52,908 WARN [Thread-2245 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/585d9600-f30d-a03e-c8cb-89565478f6d7/cluster_d1106b91-8d14-61f9-5ce5-728fd048ffe2/dfs/data/data3/current/BP-1842846650-172.17.0.2-1733173971512/current, will proceed with Du for space computation calculation, 2024-12-02T21:12:52,909 WARN [Thread-2246 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/585d9600-f30d-a03e-c8cb-89565478f6d7/cluster_d1106b91-8d14-61f9-5ce5-728fd048ffe2/dfs/data/data4/current/BP-1842846650-172.17.0.2-1733173971512/current, will proceed with Du for space computation calculation, 2024-12-02T21:12:52,926 WARN [Thread-2221 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-02T21:12:52,928 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x43d1cdae2ab0c34a with lease ID 0x7e566f4a40561f5a: Processing first storage report for DS-2be056ff-97f9-49fd-8b1e-adcd9c7eee03 from datanode DatanodeRegistration(127.0.0.1:38953, datanodeUuid=9397b931-0a97-4534-b59b-d0524d24262a, infoPort=33561, infoSecurePort=0, ipcPort=34065, storageInfo=lv=-57;cid=testClusterID;nsid=17149769;c=1733173971512) 2024-12-02T21:12:52,928 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x43d1cdae2ab0c34a with lease ID 0x7e566f4a40561f5a: from storage DS-2be056ff-97f9-49fd-8b1e-adcd9c7eee03 node DatanodeRegistration(127.0.0.1:38953, datanodeUuid=9397b931-0a97-4534-b59b-d0524d24262a, infoPort=33561, infoSecurePort=0, ipcPort=34065, storageInfo=lv=-57;cid=testClusterID;nsid=17149769;c=1733173971512), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-02T21:12:52,928 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x43d1cdae2ab0c34a with lease ID 0x7e566f4a40561f5a: Processing first storage report for DS-9842f9df-f28b-4b04-b59b-a0b48b9c1add from datanode DatanodeRegistration(127.0.0.1:38953, datanodeUuid=9397b931-0a97-4534-b59b-d0524d24262a, infoPort=33561, infoSecurePort=0, ipcPort=34065, storageInfo=lv=-57;cid=testClusterID;nsid=17149769;c=1733173971512) 2024-12-02T21:12:52,928 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x43d1cdae2ab0c34a with lease ID 0x7e566f4a40561f5a: from storage DS-9842f9df-f28b-4b04-b59b-a0b48b9c1add node DatanodeRegistration(127.0.0.1:38953, datanodeUuid=9397b931-0a97-4534-b59b-d0524d24262a, infoPort=33561, infoSecurePort=0, ipcPort=34065, storageInfo=lv=-57;cid=testClusterID;nsid=17149769;c=1733173971512), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-02T21:12:52,948 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(703): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/585d9600-f30d-a03e-c8cb-89565478f6d7 2024-12-02T21:12:52,953 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(259): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/585d9600-f30d-a03e-c8cb-89565478f6d7/cluster_d1106b91-8d14-61f9-5ce5-728fd048ffe2/zookeeper_0, clientPort=56969, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/585d9600-f30d-a03e-c8cb-89565478f6d7/cluster_d1106b91-8d14-61f9-5ce5-728fd048ffe2/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/585d9600-f30d-a03e-c8cb-89565478f6d7/cluster_d1106b91-8d14-61f9-5ce5-728fd048ffe2/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-02T21:12:52,955 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(284): Started MiniZooKeeperCluster and ran 'stat' on client port=56969 2024-12-02T21:12:52,955 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T21:12:52,957 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T21:12:52,966 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44483 is added to blk_1073741825_1001 (size=7) 2024-12-02T21:12:52,966 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38953 is added to blk_1073741825_1001 (size=7) 2024-12-02T21:12:52,967 INFO [Time-limited test {}] util.FSUtils(490): Created version file at hdfs://localhost:41797/user/jenkins/test-data/f799a6d8-88fa-eef1-4b53-8273c9bb93a2 with version=8 2024-12-02T21:12:52,967 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1462): The hbase.fs.tmp.dir is set to hdfs://localhost:39905/user/jenkins/test-data/2cbae3b8-f761-787a-953c-0b58f120ed37/hbase-staging 2024-12-02T21:12:52,969 INFO [Time-limited test {}] client.ConnectionUtils(129): master/7d4f3b9a7081:0 server-side Connection retries=45 2024-12-02T21:12:52,970 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-02T21:12:52,970 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-02T21:12:52,970 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-02T21:12:52,970 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-02T21:12:52,970 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-02T21:12:52,970 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-02T21:12:52,970 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-02T21:12:52,971 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:45469 2024-12-02T21:12:52,971 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T21:12:52,973 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T21:12:52,976 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=master:45469 connecting to ZooKeeper ensemble=127.0.0.1:56969 2024-12-02T21:12:53,029 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:454690x0, quorum=127.0.0.1:56969, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-02T21:12:53,030 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:45469-0x101992db24c0000 connected 2024-12-02T21:12:53,095 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:45469-0x101992db24c0000, quorum=127.0.0.1:56969, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-02T21:12:53,096 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:45469-0x101992db24c0000, quorum=127.0.0.1:56969, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-02T21:12:53,097 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:45469-0x101992db24c0000, quorum=127.0.0.1:56969, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-02T21:12:53,098 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=45469 2024-12-02T21:12:53,099 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=45469 2024-12-02T21:12:53,102 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=45469 2024-12-02T21:12:53,104 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=45469 2024-12-02T21:12:53,104 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=45469 2024-12-02T21:12:53,104 INFO [Time-limited test {}] master.HMaster(488): hbase.rootdir=hdfs://localhost:41797/user/jenkins/test-data/f799a6d8-88fa-eef1-4b53-8273c9bb93a2, hbase.cluster.distributed=false 2024-12-02T21:12:53,118 INFO [Time-limited test {}] client.ConnectionUtils(129): regionserver/7d4f3b9a7081:0 server-side Connection retries=45 2024-12-02T21:12:53,118 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-02T21:12:53,118 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-02T21:12:53,118 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-02T21:12:53,118 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-02T21:12:53,118 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-02T21:12:53,118 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-02T21:12:53,118 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-02T21:12:53,119 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:45513 2024-12-02T21:12:53,119 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-02T21:12:53,120 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-02T21:12:53,120 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T21:12:53,122 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T21:12:53,123 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=regionserver:45513 connecting to ZooKeeper ensemble=127.0.0.1:56969 2024-12-02T21:12:53,128 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:455130x0, quorum=127.0.0.1:56969, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-02T21:12:53,129 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:455130x0, quorum=127.0.0.1:56969, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-02T21:12:53,129 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:45513-0x101992db24c0001 connected 2024-12-02T21:12:53,129 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45513-0x101992db24c0001, quorum=127.0.0.1:56969, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-02T21:12:53,130 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45513-0x101992db24c0001, quorum=127.0.0.1:56969, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-02T21:12:53,130 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=45513 2024-12-02T21:12:53,131 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=45513 2024-12-02T21:12:53,133 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=45513 2024-12-02T21:12:53,134 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=45513 2024-12-02T21:12:53,136 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=45513 2024-12-02T21:12:53,137 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] master.HMaster(2445): Adding backup master ZNode /hbase/backup-masters/7d4f3b9a7081,45469,1733173972969 2024-12-02T21:12:53,145 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45469-0x101992db24c0000, quorum=127.0.0.1:56969, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-02T21:12:53,145 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45513-0x101992db24c0001, quorum=127.0.0.1:56969, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-02T21:12:53,145 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:45469-0x101992db24c0000, quorum=127.0.0.1:56969, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/7d4f3b9a7081,45469,1733173972969 2024-12-02T21:12:53,146 DEBUG [M:0;7d4f3b9a7081:45469 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;7d4f3b9a7081:45469 2024-12-02T21:12:53,153 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45469-0x101992db24c0000, quorum=127.0.0.1:56969, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-02T21:12:53,153 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45513-0x101992db24c0001, quorum=127.0.0.1:56969, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-02T21:12:53,153 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45469-0x101992db24c0000, quorum=127.0.0.1:56969, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T21:12:53,153 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45513-0x101992db24c0001, quorum=127.0.0.1:56969, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T21:12:53,154 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:45469-0x101992db24c0000, quorum=127.0.0.1:56969, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-02T21:12:53,154 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/7d4f3b9a7081,45469,1733173972969 from backup master directory 2024-12-02T21:12:53,154 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(111): master:45469-0x101992db24c0000, quorum=127.0.0.1:56969, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-02T21:12:53,161 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45513-0x101992db24c0001, quorum=127.0.0.1:56969, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-02T21:12:53,161 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45469-0x101992db24c0000, quorum=127.0.0.1:56969, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/7d4f3b9a7081,45469,1733173972969 2024-12-02T21:12:53,162 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45469-0x101992db24c0000, quorum=127.0.0.1:56969, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-02T21:12:53,162 WARN [master/7d4f3b9a7081:0:becomeActiveMaster {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-02T21:12:53,162 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=7d4f3b9a7081,45469,1733173972969 2024-12-02T21:12:53,171 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44483 is added to blk_1073741826_1002 (size=42) 2024-12-02T21:12:53,171 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38953 is added to blk_1073741826_1002 (size=42) 2024-12-02T21:12:53,172 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] util.FSUtils(639): Created cluster ID file at hdfs://localhost:41797/user/jenkins/test-data/f799a6d8-88fa-eef1-4b53-8273c9bb93a2/hbase.id with ID: f0d9db9f-3b8b-4a09-9e60-390fe7e3265e 2024-12-02T21:12:53,184 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T21:12:53,195 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45513-0x101992db24c0001, quorum=127.0.0.1:56969, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T21:12:53,195 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45469-0x101992db24c0000, quorum=127.0.0.1:56969, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T21:12:53,202 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44483 is added to blk_1073741827_1003 (size=196) 2024-12-02T21:12:53,202 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38953 is added to blk_1073741827_1003 (size=196) 2024-12-02T21:12:53,202 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] region.MasterRegion(372): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-02T21:12:53,203 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-02T21:12:53,203 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-02T21:12:53,210 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44483 is added to blk_1073741828_1004 (size=1189) 2024-12-02T21:12:53,210 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38953 is added to blk_1073741828_1004 (size=1189) 2024-12-02T21:12:53,211 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] regionserver.HRegion(7124): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:41797/user/jenkins/test-data/f799a6d8-88fa-eef1-4b53-8273c9bb93a2/MasterData/data/master/store 2024-12-02T21:12:53,217 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38953 is added to blk_1073741829_1005 (size=34) 2024-12-02T21:12:53,217 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44483 is added to blk_1073741829_1005 (size=34) 2024-12-02T21:12:53,218 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] regionserver.HRegion(894): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T21:12:53,218 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] regionserver.HRegion(1681): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-02T21:12:53,218 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] regionserver.HRegion(1703): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-02T21:12:53,218 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] regionserver.HRegion(1724): Waiting without time limit for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-02T21:12:53,218 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] regionserver.HRegion(1791): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-02T21:12:53,218 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] regionserver.HRegion(1801): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-02T21:12:53,218 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] regionserver.HRegion(1922): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-02T21:12:53,218 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] regionserver.HRegion(1635): Region close journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-02T21:12:53,219 WARN [master/7d4f3b9a7081:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:41797/user/jenkins/test-data/f799a6d8-88fa-eef1-4b53-8273c9bb93a2/MasterData/data/master/store/.initializing 2024-12-02T21:12:53,219 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:41797/user/jenkins/test-data/f799a6d8-88fa-eef1-4b53-8273c9bb93a2/MasterData/WALs/7d4f3b9a7081,45469,1733173972969 2024-12-02T21:12:53,221 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=7d4f3b9a7081%2C45469%2C1733173972969, suffix=, logDir=hdfs://localhost:41797/user/jenkins/test-data/f799a6d8-88fa-eef1-4b53-8273c9bb93a2/MasterData/WALs/7d4f3b9a7081,45469,1733173972969, archiveDir=hdfs://localhost:41797/user/jenkins/test-data/f799a6d8-88fa-eef1-4b53-8273c9bb93a2/MasterData/oldWALs, maxLogs=10 2024-12-02T21:12:53,221 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7d4f3b9a7081%2C45469%2C1733173972969.1733173973221 2024-12-02T21:12:53,228 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/f799a6d8-88fa-eef1-4b53-8273c9bb93a2/MasterData/WALs/7d4f3b9a7081,45469,1733173972969/7d4f3b9a7081%2C45469%2C1733173972969.1733173973221 2024-12-02T21:12:53,228 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39977:39977),(127.0.0.1/127.0.0.1:33561:33561)] 2024-12-02T21:12:53,228 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] regionserver.HRegion(7285): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-02T21:12:53,229 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] regionserver.HRegion(894): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T21:12:53,229 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] regionserver.HRegion(7327): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-02T21:12:53,229 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] regionserver.HRegion(7330): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-02T21:12:53,230 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-02T21:12:53,231 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-02T21:12:53,231 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T21:12:53,231 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-02T21:12:53,231 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-02T21:12:53,232 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-02T21:12:53,232 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T21:12:53,233 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T21:12:53,233 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-02T21:12:53,234 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-02T21:12:53,234 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T21:12:53,234 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T21:12:53,234 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-02T21:12:53,235 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-02T21:12:53,235 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T21:12:53,235 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T21:12:53,236 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41797/user/jenkins/test-data/f799a6d8-88fa-eef1-4b53-8273c9bb93a2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-02T21:12:53,236 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41797/user/jenkins/test-data/f799a6d8-88fa-eef1-4b53-8273c9bb93a2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-02T21:12:53,238 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-02T21:12:53,239 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] regionserver.HRegion(1085): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-02T21:12:53,240 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41797/user/jenkins/test-data/f799a6d8-88fa-eef1-4b53-8273c9bb93a2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-02T21:12:53,241 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] regionserver.HRegion(1102): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=850819, jitterRate=0.08187301456928253}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-02T21:12:53,241 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] regionserver.HRegion(1001): Region open journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-02T21:12:53,241 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-02T21:12:53,243 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@944960b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T21:12:53,244 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] master.HMaster(882): No meta location available on zookeeper, skip migrating... 2024-12-02T21:12:53,244 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-02T21:12:53,244 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(633): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-02T21:12:53,244 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-02T21:12:53,245 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(653): Recovered RegionProcedureStore lease in 0 msec 2024-12-02T21:12:53,245 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(667): Loaded RegionProcedureStore in 0 msec 2024-12-02T21:12:53,245 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-02T21:12:53,248 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-02T21:12:53,249 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45469-0x101992db24c0000, quorum=127.0.0.1:56969, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-02T21:12:53,253 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/balancer already deleted, retry=false 2024-12-02T21:12:53,253 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-02T21:12:53,254 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45469-0x101992db24c0000, quorum=127.0.0.1:56969, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-02T21:12:53,261 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/normalizer already deleted, retry=false 2024-12-02T21:12:53,262 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-02T21:12:53,263 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45469-0x101992db24c0000, quorum=127.0.0.1:56969, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-02T21:12:53,270 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/switch/split already deleted, retry=false 2024-12-02T21:12:53,270 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45469-0x101992db24c0000, quorum=127.0.0.1:56969, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-02T21:12:53,278 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/switch/merge already deleted, retry=false 2024-12-02T21:12:53,280 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45469-0x101992db24c0000, quorum=127.0.0.1:56969, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-02T21:12:53,286 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-02T21:12:53,295 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45469-0x101992db24c0000, quorum=127.0.0.1:56969, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-02T21:12:53,295 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45513-0x101992db24c0001, quorum=127.0.0.1:56969, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-02T21:12:53,295 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45469-0x101992db24c0000, quorum=127.0.0.1:56969, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T21:12:53,295 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45513-0x101992db24c0001, quorum=127.0.0.1:56969, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T21:12:53,295 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] master.HMaster(826): Active/primary master=7d4f3b9a7081,45469,1733173972969, sessionid=0x101992db24c0000, setting cluster-up flag (Was=false) 2024-12-02T21:12:53,311 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45469-0x101992db24c0000, quorum=127.0.0.1:56969, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T21:12:53,312 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45513-0x101992db24c0001, quorum=127.0.0.1:56969, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T21:12:53,336 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-02T21:12:53,337 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=7d4f3b9a7081,45469,1733173972969 2024-12-02T21:12:53,353 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45513-0x101992db24c0001, quorum=127.0.0.1:56969, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T21:12:53,353 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45469-0x101992db24c0000, quorum=127.0.0.1:56969, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T21:12:53,378 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-02T21:12:53,379 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=7d4f3b9a7081,45469,1733173972969 2024-12-02T21:12:53,381 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT; InitMetaProcedure table=hbase:meta 2024-12-02T21:12:53,382 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(575): slop=0.2 2024-12-02T21:12:53,382 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(294): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-02T21:12:53,382 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] balancer.RegionLocationFinder(146): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 7d4f3b9a7081,45469,1733173972969 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-02T21:12:53,382 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/7d4f3b9a7081:0, corePoolSize=5, maxPoolSize=5 2024-12-02T21:12:53,382 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/7d4f3b9a7081:0, corePoolSize=5, maxPoolSize=5 2024-12-02T21:12:53,382 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/7d4f3b9a7081:0, corePoolSize=5, maxPoolSize=5 2024-12-02T21:12:53,382 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/7d4f3b9a7081:0, corePoolSize=5, maxPoolSize=5 2024-12-02T21:12:53,382 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/7d4f3b9a7081:0, corePoolSize=10, maxPoolSize=10 2024-12-02T21:12:53,382 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/7d4f3b9a7081:0, corePoolSize=1, maxPoolSize=1 2024-12-02T21:12:53,382 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/7d4f3b9a7081:0, corePoolSize=2, maxPoolSize=2 2024-12-02T21:12:53,382 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/7d4f3b9a7081:0, corePoolSize=1, maxPoolSize=1 2024-12-02T21:12:53,383 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733174003383 2024-12-02T21:12:53,383 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-02T21:12:53,383 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-02T21:12:53,383 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-02T21:12:53,383 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-02T21:12:53,383 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-02T21:12:53,383 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-02T21:12:53,383 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-02T21:12:53,383 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(96): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, locked=true; InitMetaProcedure table=hbase:meta 2024-12-02T21:12:53,384 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(75): BOOTSTRAP: creating hbase:meta region 2024-12-02T21:12:53,384 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-02T21:12:53,384 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-02T21:12:53,384 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-02T21:12:53,384 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-02T21:12:53,384 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-02T21:12:53,384 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/7d4f3b9a7081:0:becomeActiveMaster-HFileCleaner.large.0-1733173973384,5,FailOnTimeoutGroup] 2024-12-02T21:12:53,384 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T21:12:53,384 INFO [PEWorker-1 {}] util.FSTableDescriptors(133): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-02T21:12:53,385 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/7d4f3b9a7081:0:becomeActiveMaster-HFileCleaner.small.0-1733173973384,5,FailOnTimeoutGroup] 2024-12-02T21:12:53,385 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-02T21:12:53,385 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] master.HMaster(1680): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-02T21:12:53,385 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-02T21:12:53,385 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-02T21:12:53,390 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38953 is added to blk_1073741831_1007 (size=1039) 2024-12-02T21:12:53,390 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44483 is added to blk_1073741831_1007 (size=1039) 2024-12-02T21:12:53,391 INFO [PEWorker-1 {}] util.FSTableDescriptors(140): Updated hbase:meta table descriptor to hdfs://localhost:41797/user/jenkins/test-data/f799a6d8-88fa-eef1-4b53-8273c9bb93a2/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1039 2024-12-02T21:12:53,391 INFO [PEWorker-1 {}] regionserver.HRegion(7106): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:41797/user/jenkins/test-data/f799a6d8-88fa-eef1-4b53-8273c9bb93a2 2024-12-02T21:12:53,397 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38953 is added to blk_1073741832_1008 (size=32) 2024-12-02T21:12:53,398 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44483 is added to blk_1073741832_1008 (size=32) 2024-12-02T21:12:53,398 DEBUG [PEWorker-1 {}] regionserver.HRegion(894): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T21:12:53,399 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-02T21:12:53,401 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-02T21:12:53,401 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T21:12:53,401 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-02T21:12:53,401 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-02T21:12:53,402 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-02T21:12:53,402 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T21:12:53,403 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-02T21:12:53,403 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-02T21:12:53,404 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-02T21:12:53,404 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T21:12:53,404 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-02T21:12:53,405 DEBUG [PEWorker-1 {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41797/user/jenkins/test-data/f799a6d8-88fa-eef1-4b53-8273c9bb93a2/data/hbase/meta/1588230740 2024-12-02T21:12:53,405 DEBUG [PEWorker-1 {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41797/user/jenkins/test-data/f799a6d8-88fa-eef1-4b53-8273c9bb93a2/data/hbase/meta/1588230740 2024-12-02T21:12:53,407 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-02T21:12:53,407 DEBUG [PEWorker-1 {}] regionserver.HRegion(1085): writing seq id for 1588230740 2024-12-02T21:12:53,409 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41797/user/jenkins/test-data/f799a6d8-88fa-eef1-4b53-8273c9bb93a2/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-02T21:12:53,410 INFO [PEWorker-1 {}] regionserver.HRegion(1102): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=823248, jitterRate=0.04681403934955597}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-02T21:12:53,410 DEBUG [PEWorker-1 {}] regionserver.HRegion(1001): Region open journal for 1588230740: 2024-12-02T21:12:53,410 DEBUG [PEWorker-1 {}] regionserver.HRegion(1681): Closing 1588230740, disabling compactions & flushes 2024-12-02T21:12:53,410 INFO [PEWorker-1 {}] regionserver.HRegion(1703): Closing region hbase:meta,,1.1588230740 2024-12-02T21:12:53,410 DEBUG [PEWorker-1 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:meta,,1.1588230740 2024-12-02T21:12:53,410 DEBUG [PEWorker-1 {}] regionserver.HRegion(1791): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-02T21:12:53,410 DEBUG [PEWorker-1 {}] regionserver.HRegion(1801): Updates disabled for region hbase:meta,,1.1588230740 2024-12-02T21:12:53,411 INFO [PEWorker-1 {}] regionserver.HRegion(1922): Closed hbase:meta,,1.1588230740 2024-12-02T21:12:53,411 DEBUG [PEWorker-1 {}] regionserver.HRegion(1635): Region close journal for 1588230740: 2024-12-02T21:12:53,411 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(96): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, locked=true; InitMetaProcedure table=hbase:meta 2024-12-02T21:12:53,411 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(107): Going to assign meta 2024-12-02T21:12:53,412 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-02T21:12:53,412 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-02T21:12:53,413 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-02T21:12:53,447 DEBUG [RS:0;7d4f3b9a7081:45513 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;7d4f3b9a7081:45513 2024-12-02T21:12:53,448 INFO [RS:0;7d4f3b9a7081:45513 {}] regionserver.HRegionServer(1008): ClusterId : f0d9db9f-3b8b-4a09-9e60-390fe7e3265e 2024-12-02T21:12:53,448 DEBUG [RS:0;7d4f3b9a7081:45513 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-02T21:12:53,454 DEBUG [RS:0;7d4f3b9a7081:45513 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-02T21:12:53,454 DEBUG [RS:0;7d4f3b9a7081:45513 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-02T21:12:53,462 DEBUG [RS:0;7d4f3b9a7081:45513 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-02T21:12:53,462 DEBUG [RS:0;7d4f3b9a7081:45513 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5af6cdfe, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T21:12:53,463 DEBUG [RS:0;7d4f3b9a7081:45513 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@bbe902c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=7d4f3b9a7081/172.17.0.2:0 2024-12-02T21:12:53,463 INFO [RS:0;7d4f3b9a7081:45513 {}] regionserver.RegionServerCoprocessorHost(67): System coprocessor loading is enabled 2024-12-02T21:12:53,463 INFO [RS:0;7d4f3b9a7081:45513 {}] regionserver.RegionServerCoprocessorHost(68): Table coprocessor loading is enabled 2024-12-02T21:12:53,463 DEBUG [RS:0;7d4f3b9a7081:45513 {}] regionserver.HRegionServer(1090): About to register with Master. 2024-12-02T21:12:53,464 INFO [RS:0;7d4f3b9a7081:45513 {}] regionserver.HRegionServer(3073): reportForDuty to master=7d4f3b9a7081,45469,1733173972969 with isa=7d4f3b9a7081/172.17.0.2:45513, startcode=1733173973117 2024-12-02T21:12:53,464 DEBUG [RS:0;7d4f3b9a7081:45513 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-02T21:12:53,465 INFO [RS-EventLoopGroup-14-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34433, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.6 (auth:SIMPLE), service=RegionServerStatusService 2024-12-02T21:12:53,466 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45469 {}] master.ServerManager(332): Checking decommissioned status of RegionServer 7d4f3b9a7081,45513,1733173973117 2024-12-02T21:12:53,466 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45469 {}] master.ServerManager(486): Registering regionserver=7d4f3b9a7081,45513,1733173973117 2024-12-02T21:12:53,467 DEBUG [RS:0;7d4f3b9a7081:45513 {}] regionserver.HRegionServer(1725): Config from master: hbase.rootdir=hdfs://localhost:41797/user/jenkins/test-data/f799a6d8-88fa-eef1-4b53-8273c9bb93a2 2024-12-02T21:12:53,467 DEBUG [RS:0;7d4f3b9a7081:45513 {}] regionserver.HRegionServer(1725): Config from master: fs.defaultFS=hdfs://localhost:41797 2024-12-02T21:12:53,467 DEBUG [RS:0;7d4f3b9a7081:45513 {}] regionserver.HRegionServer(1725): Config from master: hbase.master.info.port=-1 2024-12-02T21:12:53,478 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45469-0x101992db24c0000, quorum=127.0.0.1:56969, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-02T21:12:53,478 DEBUG [RS:0;7d4f3b9a7081:45513 {}] zookeeper.ZKUtil(111): regionserver:45513-0x101992db24c0001, quorum=127.0.0.1:56969, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/7d4f3b9a7081,45513,1733173973117 2024-12-02T21:12:53,478 WARN [RS:0;7d4f3b9a7081:45513 {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-02T21:12:53,478 INFO [RS:0;7d4f3b9a7081:45513 {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-02T21:12:53,478 DEBUG [RS:0;7d4f3b9a7081:45513 {}] regionserver.HRegionServer(2100): logDir=hdfs://localhost:41797/user/jenkins/test-data/f799a6d8-88fa-eef1-4b53-8273c9bb93a2/WALs/7d4f3b9a7081,45513,1733173973117 2024-12-02T21:12:53,479 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [7d4f3b9a7081,45513,1733173973117] 2024-12-02T21:12:53,481 DEBUG [RS:0;7d4f3b9a7081:45513 {}] regionserver.Replication(140): Replication stats-in-log period=300 seconds 2024-12-02T21:12:53,482 INFO [RS:0;7d4f3b9a7081:45513 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-02T21:12:53,483 INFO [RS:0;7d4f3b9a7081:45513 {}] regionserver.MemStoreFlusher(130): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-02T21:12:53,483 INFO [RS:0;7d4f3b9a7081:45513 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-02T21:12:53,483 INFO [RS:0;7d4f3b9a7081:45513 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-02T21:12:53,483 INFO [RS:0;7d4f3b9a7081:45513 {}] regionserver.HRegionServer$CompactionChecker(1988): CompactionChecker runs every PT1S 2024-12-02T21:12:53,484 INFO [RS:0;7d4f3b9a7081:45513 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-02T21:12:53,484 DEBUG [RS:0;7d4f3b9a7081:45513 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/7d4f3b9a7081:0, corePoolSize=1, maxPoolSize=1 2024-12-02T21:12:53,484 DEBUG [RS:0;7d4f3b9a7081:45513 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/7d4f3b9a7081:0, corePoolSize=1, maxPoolSize=1 2024-12-02T21:12:53,484 DEBUG [RS:0;7d4f3b9a7081:45513 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/7d4f3b9a7081:0, corePoolSize=1, maxPoolSize=1 2024-12-02T21:12:53,484 DEBUG [RS:0;7d4f3b9a7081:45513 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/7d4f3b9a7081:0, corePoolSize=1, maxPoolSize=1 2024-12-02T21:12:53,484 DEBUG [RS:0;7d4f3b9a7081:45513 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/7d4f3b9a7081:0, corePoolSize=1, maxPoolSize=1 2024-12-02T21:12:53,484 DEBUG [RS:0;7d4f3b9a7081:45513 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/7d4f3b9a7081:0, corePoolSize=2, maxPoolSize=2 2024-12-02T21:12:53,485 DEBUG [RS:0;7d4f3b9a7081:45513 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/7d4f3b9a7081:0, corePoolSize=1, maxPoolSize=1 2024-12-02T21:12:53,485 DEBUG [RS:0;7d4f3b9a7081:45513 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/7d4f3b9a7081:0, corePoolSize=1, maxPoolSize=1 2024-12-02T21:12:53,485 DEBUG [RS:0;7d4f3b9a7081:45513 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/7d4f3b9a7081:0, corePoolSize=1, maxPoolSize=1 2024-12-02T21:12:53,485 DEBUG [RS:0;7d4f3b9a7081:45513 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/7d4f3b9a7081:0, corePoolSize=1, maxPoolSize=1 2024-12-02T21:12:53,485 DEBUG [RS:0;7d4f3b9a7081:45513 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/7d4f3b9a7081:0, corePoolSize=1, maxPoolSize=1 2024-12-02T21:12:53,485 DEBUG [RS:0;7d4f3b9a7081:45513 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/7d4f3b9a7081:0, corePoolSize=3, maxPoolSize=3 2024-12-02T21:12:53,485 DEBUG [RS:0;7d4f3b9a7081:45513 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/7d4f3b9a7081:0, corePoolSize=3, maxPoolSize=3 2024-12-02T21:12:53,485 INFO [RS:0;7d4f3b9a7081:45513 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-02T21:12:53,485 INFO [RS:0;7d4f3b9a7081:45513 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-02T21:12:53,485 INFO [RS:0;7d4f3b9a7081:45513 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-02T21:12:53,485 INFO [RS:0;7d4f3b9a7081:45513 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-02T21:12:53,485 INFO [RS:0;7d4f3b9a7081:45513 {}] hbase.ChoreService(168): Chore ScheduledChore name=7d4f3b9a7081,45513,1733173973117-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-02T21:12:53,503 INFO [RS:0;7d4f3b9a7081:45513 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-02T21:12:53,503 INFO [RS:0;7d4f3b9a7081:45513 {}] hbase.ChoreService(168): Chore ScheduledChore name=7d4f3b9a7081,45513,1733173973117-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-02T21:12:53,519 INFO [RS:0;7d4f3b9a7081:45513 {}] regionserver.Replication(204): 7d4f3b9a7081,45513,1733173973117 started 2024-12-02T21:12:53,519 INFO [RS:0;7d4f3b9a7081:45513 {}] regionserver.HRegionServer(1767): Serving as 7d4f3b9a7081,45513,1733173973117, RpcServer on 7d4f3b9a7081/172.17.0.2:45513, sessionid=0x101992db24c0001 2024-12-02T21:12:53,520 DEBUG [RS:0;7d4f3b9a7081:45513 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-02T21:12:53,520 DEBUG [RS:0;7d4f3b9a7081:45513 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 7d4f3b9a7081,45513,1733173973117 2024-12-02T21:12:53,520 DEBUG [RS:0;7d4f3b9a7081:45513 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '7d4f3b9a7081,45513,1733173973117' 2024-12-02T21:12:53,520 DEBUG [RS:0;7d4f3b9a7081:45513 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-02T21:12:53,520 DEBUG [RS:0;7d4f3b9a7081:45513 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-02T21:12:53,521 DEBUG [RS:0;7d4f3b9a7081:45513 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-02T21:12:53,521 DEBUG [RS:0;7d4f3b9a7081:45513 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-02T21:12:53,521 DEBUG [RS:0;7d4f3b9a7081:45513 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 7d4f3b9a7081,45513,1733173973117 2024-12-02T21:12:53,521 DEBUG [RS:0;7d4f3b9a7081:45513 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '7d4f3b9a7081,45513,1733173973117' 2024-12-02T21:12:53,521 DEBUG [RS:0;7d4f3b9a7081:45513 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-02T21:12:53,521 DEBUG [RS:0;7d4f3b9a7081:45513 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-02T21:12:53,521 DEBUG [RS:0;7d4f3b9a7081:45513 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-02T21:12:53,521 INFO [RS:0;7d4f3b9a7081:45513 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-02T21:12:53,521 INFO [RS:0;7d4f3b9a7081:45513 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-02T21:12:53,541 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.meta.1733173708307.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:12:53,563 WARN [7d4f3b9a7081:45469 {}] assignment.AssignmentManager(2423): No servers available; cannot place 1 unassigned regions. 2024-12-02T21:12:53,624 INFO [RS:0;7d4f3b9a7081:45513 {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=7d4f3b9a7081%2C45513%2C1733173973117, suffix=, logDir=hdfs://localhost:41797/user/jenkins/test-data/f799a6d8-88fa-eef1-4b53-8273c9bb93a2/WALs/7d4f3b9a7081,45513,1733173973117, archiveDir=hdfs://localhost:41797/user/jenkins/test-data/f799a6d8-88fa-eef1-4b53-8273c9bb93a2/oldWALs, maxLogs=32 2024-12-02T21:12:53,625 INFO [RS:0;7d4f3b9a7081:45513 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7d4f3b9a7081%2C45513%2C1733173973117.1733173973624 2024-12-02T21:12:53,633 INFO [RS:0;7d4f3b9a7081:45513 {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/f799a6d8-88fa-eef1-4b53-8273c9bb93a2/WALs/7d4f3b9a7081,45513,1733173973117/7d4f3b9a7081%2C45513%2C1733173973117.1733173973624 2024-12-02T21:12:53,633 DEBUG [RS:0;7d4f3b9a7081:45513 {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39977:39977),(127.0.0.1/127.0.0.1:33561:33561)] 2024-12-02T21:12:53,813 DEBUG [7d4f3b9a7081:45469 {}] assignment.AssignmentManager(2444): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-12-02T21:12:53,814 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=7d4f3b9a7081,45513,1733173973117 2024-12-02T21:12:53,816 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 7d4f3b9a7081,45513,1733173973117, state=OPENING 2024-12-02T21:12:53,887 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-02T21:12:53,895 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45469-0x101992db24c0000, quorum=127.0.0.1:56969, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T21:12:53,896 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45513-0x101992db24c0001, quorum=127.0.0.1:56969, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T21:12:53,897 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE; OpenRegionProcedure 1588230740, server=7d4f3b9a7081,45513,1733173973117}] 2024-12-02T21:12:53,897 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-02T21:12:53,897 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-02T21:12:54,008 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling 2024-12-02T21:12:54,052 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 7d4f3b9a7081,45513,1733173973117 2024-12-02T21:12:54,053 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-02T21:12:54,057 INFO [RS-EventLoopGroup-15-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35462, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-02T21:12:54,062 INFO [RS_OPEN_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(135): Open hbase:meta,,1.1588230740 2024-12-02T21:12:54,062 INFO [RS_OPEN_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-02T21:12:54,064 INFO [RS_OPEN_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=7d4f3b9a7081%2C45513%2C1733173973117.meta, suffix=.meta, logDir=hdfs://localhost:41797/user/jenkins/test-data/f799a6d8-88fa-eef1-4b53-8273c9bb93a2/WALs/7d4f3b9a7081,45513,1733173973117, archiveDir=hdfs://localhost:41797/user/jenkins/test-data/f799a6d8-88fa-eef1-4b53-8273c9bb93a2/oldWALs, maxLogs=32 2024-12-02T21:12:54,065 INFO [RS_OPEN_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 7d4f3b9a7081%2C45513%2C1733173973117.meta.1733173974065.meta 2024-12-02T21:12:54,076 INFO [RS_OPEN_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/f799a6d8-88fa-eef1-4b53-8273c9bb93a2/WALs/7d4f3b9a7081,45513,1733173973117/7d4f3b9a7081%2C45513%2C1733173973117.meta.1733173974065.meta 2024-12-02T21:12:54,076 DEBUG [RS_OPEN_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33561:33561),(127.0.0.1/127.0.0.1:39977:39977)] 2024-12-02T21:12:54,076 DEBUG [RS_OPEN_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7285): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-02T21:12:54,076 DEBUG [RS_OPEN_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-02T21:12:54,076 DEBUG [RS_OPEN_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7999): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-02T21:12:54,077 INFO [RS_OPEN_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(436): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-02T21:12:54,077 DEBUG [RS_OPEN_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-02T21:12:54,077 DEBUG [RS_OPEN_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(894): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T21:12:54,077 DEBUG [RS_OPEN_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7327): checking encryption for 1588230740 2024-12-02T21:12:54,077 DEBUG [RS_OPEN_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7330): checking classloading for 1588230740 2024-12-02T21:12:54,078 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-02T21:12:54,079 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-02T21:12:54,079 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T21:12:54,080 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-02T21:12:54,080 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-02T21:12:54,080 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-02T21:12:54,080 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T21:12:54,081 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-02T21:12:54,081 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-02T21:12:54,081 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-02T21:12:54,082 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T21:12:54,082 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-02T21:12:54,082 DEBUG [RS_OPEN_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41797/user/jenkins/test-data/f799a6d8-88fa-eef1-4b53-8273c9bb93a2/data/hbase/meta/1588230740 2024-12-02T21:12:54,084 DEBUG [RS_OPEN_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41797/user/jenkins/test-data/f799a6d8-88fa-eef1-4b53-8273c9bb93a2/data/hbase/meta/1588230740 2024-12-02T21:12:54,085 DEBUG [RS_OPEN_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-02T21:12:54,086 DEBUG [RS_OPEN_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1085): writing seq id for 1588230740 2024-12-02T21:12:54,087 INFO [RS_OPEN_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1102): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=783564, jitterRate=-0.003647327423095703}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-02T21:12:54,087 DEBUG [RS_OPEN_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1001): Region open journal for 1588230740: 2024-12-02T21:12:54,087 INFO [RS_OPEN_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2601): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733173974052 2024-12-02T21:12:54,089 DEBUG [RS_OPEN_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2628): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-02T21:12:54,089 INFO [RS_OPEN_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(164): Opened hbase:meta,,1.1588230740 2024-12-02T21:12:54,089 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=7d4f3b9a7081,45513,1733173973117 2024-12-02T21:12:54,090 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 7d4f3b9a7081,45513,1733173973117, state=OPEN 2024-12-02T21:12:54,120 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45469-0x101992db24c0000, quorum=127.0.0.1:56969, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-02T21:12:54,120 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45513-0x101992db24c0001, quorum=127.0.0.1:56969, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-02T21:12:54,120 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-02T21:12:54,120 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-02T21:12:54,122 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=3, resume processing ppid=2 2024-12-02T21:12:54,122 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=3, ppid=2, state=SUCCESS; OpenRegionProcedure 1588230740, server=7d4f3b9a7081,45513,1733173973117 in 223 msec 2024-12-02T21:12:54,124 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=2, resume processing ppid=1 2024-12-02T21:12:54,124 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=2, ppid=1, state=SUCCESS; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 711 msec 2024-12-02T21:12:54,125 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=1, state=SUCCESS; InitMetaProcedure table=hbase:meta in 743 msec 2024-12-02T21:12:54,125 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] master.HMaster(1088): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733173974125, completionTime=-1 2024-12-02T21:12:54,126 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] master.ServerManager(907): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-12-02T21:12:54,126 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] assignment.AssignmentManager(1747): Joining cluster... 2024-12-02T21:12:54,126 DEBUG [hconnection-0x9cbd7f7-shared-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T21:12:54,127 INFO [RS-EventLoopGroup-15-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35472, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T21:12:54,128 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] assignment.AssignmentManager(1759): Number of RegionServers=1 2024-12-02T21:12:54,128 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733174034128 2024-12-02T21:12:54,128 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733174094128 2024-12-02T21:12:54,128 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] assignment.AssignmentManager(1766): Joined the cluster in 2 msec 2024-12-02T21:12:54,153 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7d4f3b9a7081,45469,1733173972969-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-02T21:12:54,154 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7d4f3b9a7081,45469,1733173972969-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-02T21:12:54,154 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7d4f3b9a7081,45469,1733173972969-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-02T21:12:54,154 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-7d4f3b9a7081:45469, period=300000, unit=MILLISECONDS is enabled. 2024-12-02T21:12:54,154 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-02T21:12:54,154 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] master.TableNamespaceManager(92): Namespace table not found. Creating... 2024-12-02T21:12:54,154 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] master.HMaster(2425): Client=null/null create 'hbase:namespace', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '10', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-02T21:12:54,155 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=hbase:namespace 2024-12-02T21:12:54,156 DEBUG [master/7d4f3b9a7081:0.Chore.1 {}] janitor.CatalogJanitor(179): 2024-12-02T21:12:54,156 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_PRE_OPERATION 2024-12-02T21:12:54,156 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T21:12:54,157 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-02T21:12:54,163 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38953 is added to blk_1073741835_1011 (size=358) 2024-12-02T21:12:54,163 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44483 is added to blk_1073741835_1011 (size=358) 2024-12-02T21:12:54,164 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 8bc7340229051f9fa244ac523d27f16c, NAME => 'hbase:namespace,,1733173974154.8bc7340229051f9fa244ac523d27f16c.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:namespace', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '10', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:41797/user/jenkins/test-data/f799a6d8-88fa-eef1-4b53-8273c9bb93a2 2024-12-02T21:12:54,170 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44483 is added to blk_1073741836_1012 (size=42) 2024-12-02T21:12:54,170 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38953 is added to blk_1073741836_1012 (size=42) 2024-12-02T21:12:54,171 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(894): Instantiated hbase:namespace,,1733173974154.8bc7340229051f9fa244ac523d27f16c.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T21:12:54,171 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1681): Closing 8bc7340229051f9fa244ac523d27f16c, disabling compactions & flushes 2024-12-02T21:12:54,171 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1703): Closing region hbase:namespace,,1733173974154.8bc7340229051f9fa244ac523d27f16c. 2024-12-02T21:12:54,171 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:namespace,,1733173974154.8bc7340229051f9fa244ac523d27f16c. 2024-12-02T21:12:54,171 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on hbase:namespace,,1733173974154.8bc7340229051f9fa244ac523d27f16c. after waiting 0 ms 2024-12-02T21:12:54,171 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region hbase:namespace,,1733173974154.8bc7340229051f9fa244ac523d27f16c. 2024-12-02T21:12:54,171 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1922): Closed hbase:namespace,,1733173974154.8bc7340229051f9fa244ac523d27f16c. 2024-12-02T21:12:54,171 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1635): Region close journal for 8bc7340229051f9fa244ac523d27f16c: 2024-12-02T21:12:54,172 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_ADD_TO_META 2024-12-02T21:12:54,172 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"hbase:namespace,,1733173974154.8bc7340229051f9fa244ac523d27f16c.","families":{"info":[{"qualifier":"regioninfo","vlen":41,"tag":[],"timestamp":"1733173974172"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733173974172"}]},"ts":"1733173974172"} 2024-12-02T21:12:54,174 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-02T21:12:54,174 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-02T21:12:54,174 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:namespace","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733173974174"}]},"ts":"1733173974174"} 2024-12-02T21:12:54,175 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:namespace, state=ENABLING in hbase:meta 2024-12-02T21:12:54,195 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:namespace, region=8bc7340229051f9fa244ac523d27f16c, ASSIGN}] 2024-12-02T21:12:54,196 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:namespace, region=8bc7340229051f9fa244ac523d27f16c, ASSIGN 2024-12-02T21:12:54,197 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=hbase:namespace, region=8bc7340229051f9fa244ac523d27f16c, ASSIGN; state=OFFLINE, location=7d4f3b9a7081,45513,1733173973117; forceNewPlan=false, retain=false 2024-12-02T21:12:54,348 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=5 updating hbase:meta row=8bc7340229051f9fa244ac523d27f16c, regionState=OPENING, regionLocation=7d4f3b9a7081,45513,1733173973117 2024-12-02T21:12:54,352 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE; OpenRegionProcedure 8bc7340229051f9fa244ac523d27f16c, server=7d4f3b9a7081,45513,1733173973117}] 2024-12-02T21:12:54,507 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 7d4f3b9a7081,45513,1733173973117 2024-12-02T21:12:54,515 INFO [RS_OPEN_PRIORITY_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(135): Open hbase:namespace,,1733173974154.8bc7340229051f9fa244ac523d27f16c. 2024-12-02T21:12:54,515 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7285): Opening region: {ENCODED => 8bc7340229051f9fa244ac523d27f16c, NAME => 'hbase:namespace,,1733173974154.8bc7340229051f9fa244ac523d27f16c.', STARTKEY => '', ENDKEY => ''} 2024-12-02T21:12:54,515 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table namespace 8bc7340229051f9fa244ac523d27f16c 2024-12-02T21:12:54,515 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(894): Instantiated hbase:namespace,,1733173974154.8bc7340229051f9fa244ac523d27f16c.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T21:12:54,515 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7327): checking encryption for 8bc7340229051f9fa244ac523d27f16c 2024-12-02T21:12:54,516 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7330): checking classloading for 8bc7340229051f9fa244ac523d27f16c 2024-12-02T21:12:54,518 INFO [StoreOpener-8bc7340229051f9fa244ac523d27f16c-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 8bc7340229051f9fa244ac523d27f16c 2024-12-02T21:12:54,520 INFO [StoreOpener-8bc7340229051f9fa244ac523d27f16c-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 8bc7340229051f9fa244ac523d27f16c columnFamilyName info 2024-12-02T21:12:54,520 DEBUG [StoreOpener-8bc7340229051f9fa244ac523d27f16c-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T21:12:54,521 INFO [StoreOpener-8bc7340229051f9fa244ac523d27f16c-1 {}] regionserver.HStore(327): Store=8bc7340229051f9fa244ac523d27f16c/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T21:12:54,522 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41797/user/jenkins/test-data/f799a6d8-88fa-eef1-4b53-8273c9bb93a2/data/hbase/namespace/8bc7340229051f9fa244ac523d27f16c 2024-12-02T21:12:54,523 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41797/user/jenkins/test-data/f799a6d8-88fa-eef1-4b53-8273c9bb93a2/data/hbase/namespace/8bc7340229051f9fa244ac523d27f16c 2024-12-02T21:12:54,524 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1085): writing seq id for 8bc7340229051f9fa244ac523d27f16c 2024-12-02T21:12:54,526 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41797/user/jenkins/test-data/f799a6d8-88fa-eef1-4b53-8273c9bb93a2/data/hbase/namespace/8bc7340229051f9fa244ac523d27f16c/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-02T21:12:54,527 INFO [RS_OPEN_PRIORITY_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1102): Opened 8bc7340229051f9fa244ac523d27f16c; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=875892, jitterRate=0.11375506222248077}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-02T21:12:54,527 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1001): Region open journal for 8bc7340229051f9fa244ac523d27f16c: 2024-12-02T21:12:54,528 INFO [RS_OPEN_PRIORITY_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2601): Post open deploy tasks for hbase:namespace,,1733173974154.8bc7340229051f9fa244ac523d27f16c., pid=6, masterSystemTime=1733173974507 2024-12-02T21:12:54,529 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2628): Finished post open deploy task for hbase:namespace,,1733173974154.8bc7340229051f9fa244ac523d27f16c. 2024-12-02T21:12:54,529 INFO [RS_OPEN_PRIORITY_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(164): Opened hbase:namespace,,1733173974154.8bc7340229051f9fa244ac523d27f16c. 2024-12-02T21:12:54,529 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=5 updating hbase:meta row=8bc7340229051f9fa244ac523d27f16c, regionState=OPEN, openSeqNum=2, regionLocation=7d4f3b9a7081,45513,1733173973117 2024-12-02T21:12:54,532 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=6, resume processing ppid=5 2024-12-02T21:12:54,532 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=6, ppid=5, state=SUCCESS; OpenRegionProcedure 8bc7340229051f9fa244ac523d27f16c, server=7d4f3b9a7081,45513,1733173973117 in 179 msec 2024-12-02T21:12:54,534 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=5, resume processing ppid=4 2024-12-02T21:12:54,534 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=5, ppid=4, state=SUCCESS; TransitRegionStateProcedure table=hbase:namespace, region=8bc7340229051f9fa244ac523d27f16c, ASSIGN in 337 msec 2024-12-02T21:12:54,534 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-02T21:12:54,534 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:namespace","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733173974534"}]},"ts":"1733173974534"} 2024-12-02T21:12:54,535 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:namespace, state=ENABLED in hbase:meta 2024-12-02T21:12:54,541 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40413/user/jenkins/test-data/ab232932-2c1d-beeb-6dac-8160f7a17be7/WALs/7d4f3b9a7081,46239,1733173707323/7d4f3b9a7081%2C46239%2C1733173707323.meta.1733173708307.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T21:12:54,546 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_POST_OPERATION 2024-12-02T21:12:54,547 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=4, state=SUCCESS; CreateTableProcedure table=hbase:namespace in 392 msec 2024-12-02T21:12:54,557 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] zookeeper.ZKUtil(113): master:45469-0x101992db24c0000, quorum=127.0.0.1:56969, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/namespace 2024-12-02T21:12:54,562 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45469-0x101992db24c0000, quorum=127.0.0.1:56969, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/namespace 2024-12-02T21:12:54,562 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45469-0x101992db24c0000, quorum=127.0.0.1:56969, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T21:12:54,562 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45513-0x101992db24c0001, quorum=127.0.0.1:56969, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T21:12:54,567 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=7, state=RUNNABLE:CREATE_NAMESPACE_PREPARE; CreateNamespaceProcedure, namespace=default 2024-12-02T21:12:54,587 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45469-0x101992db24c0000, quorum=127.0.0.1:56969, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/namespace 2024-12-02T21:12:54,599 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=7, state=SUCCESS; CreateNamespaceProcedure, namespace=default in 31 msec 2024-12-02T21:12:54,610 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=8, state=RUNNABLE:CREATE_NAMESPACE_PREPARE; CreateNamespaceProcedure, namespace=hbase 2024-12-02T21:12:54,628 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45469-0x101992db24c0000, quorum=127.0.0.1:56969, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/namespace 2024-12-02T21:12:54,639 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=8, state=SUCCESS; CreateNamespaceProcedure, namespace=hbase in 28 msec 2024-12-02T21:12:54,662 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45469-0x101992db24c0000, quorum=127.0.0.1:56969, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/namespace/default 2024-12-02T21:12:54,678 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45469-0x101992db24c0000, quorum=127.0.0.1:56969, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/namespace/hbase 2024-12-02T21:12:54,678 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] master.HMaster(1218): Master has completed initialization 1.516sec 2024-12-02T21:12:54,678 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-02T21:12:54,678 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-02T21:12:54,678 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-02T21:12:54,678 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-02T21:12:54,679 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-02T21:12:54,679 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7d4f3b9a7081,45469,1733173972969-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-02T21:12:54,679 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7d4f3b9a7081,45469,1733173972969-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-02T21:12:54,680 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster {}] master.HMaster(1321): Balancer post startup initialization complete, took 0 seconds 2024-12-02T21:12:54,680 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-02T21:12:54,680 INFO [master/7d4f3b9a7081:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7d4f3b9a7081,45469,1733173972969-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-02T21:12:54,739 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2324cfc6 to 127.0.0.1:56969 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3307832b 2024-12-02T21:12:54,746 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5f3373ff, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T21:12:54,747 DEBUG [hconnection-0x3491b5d-shared-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T21:12:54,749 INFO [RS-EventLoopGroup-15-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35474, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T21:12:54,750 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1199): Minicluster is up; activeMaster=7d4f3b9a7081,45469,1733173972969 2024-12-02T21:12:54,750 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T21:12:54,752 INFO [Time-limited test {}] master.MasterRpcServices(506): Client=null/null set balanceSwitch=false 2024-12-02T21:12:54,752 INFO [Time-limited test {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-02T21:12:54,754 INFO [Time-limited test {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=test.com%2C8080%2C1, suffix=, logDir=hdfs://localhost:41797/user/jenkins/test-data/f799a6d8-88fa-eef1-4b53-8273c9bb93a2/WALs/test.com,8080,1, archiveDir=hdfs://localhost:41797/user/jenkins/test-data/f799a6d8-88fa-eef1-4b53-8273c9bb93a2/oldWALs, maxLogs=32 2024-12-02T21:12:54,755 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor test.com%2C8080%2C1.1733173974755 2024-12-02T21:12:54,759 INFO [Time-limited test {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/f799a6d8-88fa-eef1-4b53-8273c9bb93a2/WALs/test.com,8080,1/test.com%2C8080%2C1.1733173974755 2024-12-02T21:12:54,759 DEBUG [Time-limited test {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33561:33561),(127.0.0.1/127.0.0.1:39977:39977)] 2024-12-02T21:12:54,759 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor test.com%2C8080%2C1.1733173974759 2024-12-02T21:12:54,765 INFO [Time-limited test {}] wal.AbstractFSWAL(837): Rolled WAL /user/jenkins/test-data/f799a6d8-88fa-eef1-4b53-8273c9bb93a2/WALs/test.com,8080,1/test.com%2C8080%2C1.1733173974755 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/f799a6d8-88fa-eef1-4b53-8273c9bb93a2/WALs/test.com,8080,1/test.com%2C8080%2C1.1733173974759 2024-12-02T21:12:54,765 DEBUG [Time-limited test {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39977:39977),(127.0.0.1/127.0.0.1:33561:33561)] 2024-12-02T21:12:54,765 DEBUG [Time-limited test {}] wal.AbstractFSWAL(751): hdfs://localhost:41797/user/jenkins/test-data/f799a6d8-88fa-eef1-4b53-8273c9bb93a2/WALs/test.com,8080,1/test.com%2C8080%2C1.1733173974755 is not closed yet, will try archiving it next time 2024-12-02T21:12:54,765 DEBUG [WAL-Shutdown-0 {}] wal.FSHLog(499): Closing WAL writer in /user/jenkins/test-data/f799a6d8-88fa-eef1-4b53-8273c9bb93a2/WALs/test.com,8080,1 2024-12-02T21:12:54,766 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44483 is added to blk_1073741837_1013 (size=93) 2024-12-02T21:12:54,767 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38953 is added to blk_1073741837_1013 (size=93) 2024-12-02T21:12:54,767 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44483 is added to blk_1073741838_1014 (size=93) 2024-12-02T21:12:54,767 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38953 is added to blk_1073741838_1014 (size=93) 2024-12-02T21:12:54,768 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(818): Archiving hdfs://localhost:41797/user/jenkins/test-data/f799a6d8-88fa-eef1-4b53-8273c9bb93a2/WALs/test.com,8080,1/test.com%2C8080%2C1.1733173974755 to hdfs://localhost:41797/user/jenkins/test-data/f799a6d8-88fa-eef1-4b53-8273c9bb93a2/oldWALs/test.com%2C8080%2C1.1733173974755 2024-12-02T21:12:54,769 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1071): Moved 1 WAL file(s) to /user/jenkins/test-data/f799a6d8-88fa-eef1-4b53-8273c9bb93a2/oldWALs 2024-12-02T21:12:54,769 INFO [Time-limited test {}] wal.AbstractFSWAL(1074): Closed WAL: FSHLog test.com%2C8080%2C1:(num 1733173974759) 2024-12-02T21:12:54,770 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1340): Shutting down minicluster 2024-12-02T21:12:54,770 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2324cfc6 to 127.0.0.1:56969 2024-12-02T21:12:54,770 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T21:12:54,770 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-02T21:12:54,770 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=883926369, stopped=false 2024-12-02T21:12:54,770 INFO [Time-limited test {}] master.ServerManager(987): Cluster shutdown requested of master=7d4f3b9a7081,45469,1733173972969 2024-12-02T21:12:54,787 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45469-0x101992db24c0000, quorum=127.0.0.1:56969, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-02T21:12:54,787 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45513-0x101992db24c0001, quorum=127.0.0.1:56969, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-02T21:12:54,787 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45469-0x101992db24c0000, quorum=127.0.0.1:56969, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T21:12:54,787 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45513-0x101992db24c0001, quorum=127.0.0.1:56969, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T21:12:54,787 INFO [Time-limited test {}] procedure2.ProcedureExecutor(700): Stopping 2024-12-02T21:12:54,787 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:45513-0x101992db24c0001, quorum=127.0.0.1:56969, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-02T21:12:54,787 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T21:12:54,787 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:45469-0x101992db24c0000, quorum=127.0.0.1:56969, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-02T21:12:54,787 INFO [Time-limited test {}] regionserver.HRegionServer(2561): ***** STOPPING region server '7d4f3b9a7081,45513,1733173973117' ***** 2024-12-02T21:12:54,787 INFO [Time-limited test {}] regionserver.HRegionServer(2575): STOPPED: Shutdown requested 2024-12-02T21:12:54,788 INFO [RS:0;7d4f3b9a7081:45513 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-02T21:12:54,788 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(373): MemStoreFlusher.0 exiting 2024-12-02T21:12:54,788 INFO [RS:0;7d4f3b9a7081:45513 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-02T21:12:54,788 INFO [RS:0;7d4f3b9a7081:45513 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-02T21:12:54,788 INFO [RS:0;7d4f3b9a7081:45513 {}] regionserver.HRegionServer(3579): Received CLOSE for 8bc7340229051f9fa244ac523d27f16c 2024-12-02T21:12:54,788 INFO [RS:0;7d4f3b9a7081:45513 {}] regionserver.HRegionServer(1224): stopping server 7d4f3b9a7081,45513,1733173973117 2024-12-02T21:12:54,788 DEBUG [RS:0;7d4f3b9a7081:45513 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T21:12:54,788 INFO [RS:0;7d4f3b9a7081:45513 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-02T21:12:54,788 INFO [RS:0;7d4f3b9a7081:45513 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-02T21:12:54,788 INFO [RS:0;7d4f3b9a7081:45513 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-02T21:12:54,788 DEBUG [RS_CLOSE_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1681): Closing 8bc7340229051f9fa244ac523d27f16c, disabling compactions & flushes 2024-12-02T21:12:54,788 INFO [RS:0;7d4f3b9a7081:45513 {}] regionserver.HRegionServer(3579): Received CLOSE for 1588230740 2024-12-02T21:12:54,788 INFO [RS_CLOSE_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1703): Closing region hbase:namespace,,1733173974154.8bc7340229051f9fa244ac523d27f16c. 2024-12-02T21:12:54,788 DEBUG [RS_CLOSE_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:namespace,,1733173974154.8bc7340229051f9fa244ac523d27f16c. 2024-12-02T21:12:54,788 DEBUG [RS_CLOSE_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1791): Acquired close lock on hbase:namespace,,1733173974154.8bc7340229051f9fa244ac523d27f16c. after waiting 0 ms 2024-12-02T21:12:54,788 DEBUG [RS_CLOSE_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1801): Updates disabled for region hbase:namespace,,1733173974154.8bc7340229051f9fa244ac523d27f16c. 2024-12-02T21:12:54,789 INFO [RS_CLOSE_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2837): Flushing 8bc7340229051f9fa244ac523d27f16c 1/1 column families, dataSize=78 B heapSize=488 B 2024-12-02T21:12:54,789 INFO [RS:0;7d4f3b9a7081:45513 {}] regionserver.HRegionServer(1599): Waiting on 2 regions to close 2024-12-02T21:12:54,789 DEBUG [RS:0;7d4f3b9a7081:45513 {}] regionserver.HRegionServer(1603): Online Regions={1588230740=hbase:meta,,1.1588230740, 8bc7340229051f9fa244ac523d27f16c=hbase:namespace,,1733173974154.8bc7340229051f9fa244ac523d27f16c.} 2024-12-02T21:12:54,789 DEBUG [RS:0;7d4f3b9a7081:45513 {}] regionserver.HRegionServer(1629): Waiting on 1588230740, 8bc7340229051f9fa244ac523d27f16c 2024-12-02T21:12:54,789 DEBUG [RS_CLOSE_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1681): Closing 1588230740, disabling compactions & flushes 2024-12-02T21:12:54,789 INFO [RS_CLOSE_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1703): Closing region hbase:meta,,1.1588230740 2024-12-02T21:12:54,789 DEBUG [RS_CLOSE_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:meta,,1.1588230740 2024-12-02T21:12:54,789 DEBUG [RS_CLOSE_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1791): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-02T21:12:54,789 DEBUG [RS_CLOSE_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1801): Updates disabled for region hbase:meta,,1.1588230740 2024-12-02T21:12:54,789 INFO [RS_CLOSE_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2837): Flushing 1588230740 3/3 column families, dataSize=1.23 KB heapSize=2.87 KB 2024-12-02T21:12:54,809 DEBUG [RS_CLOSE_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41797/user/jenkins/test-data/f799a6d8-88fa-eef1-4b53-8273c9bb93a2/data/hbase/namespace/8bc7340229051f9fa244ac523d27f16c/.tmp/info/5a5cb70d884243c68ea47cca63b34650 is 45, key is default/info:d/1733173974574/Put/seqid=0 2024-12-02T21:12:54,809 DEBUG [RS_CLOSE_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41797/user/jenkins/test-data/f799a6d8-88fa-eef1-4b53-8273c9bb93a2/data/hbase/meta/1588230740/.tmp/info/8fd5896bb7de4e95bd84bf19c89995bf is 143, key is hbase:namespace,,1733173974154.8bc7340229051f9fa244ac523d27f16c./info:regioninfo/1733173974529/Put/seqid=0 2024-12-02T21:12:54,816 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44483 is added to blk_1073741840_1016 (size=6595) 2024-12-02T21:12:54,816 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38953 is added to blk_1073741840_1016 (size=6595) 2024-12-02T21:12:54,816 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38953 is added to blk_1073741839_1015 (size=5037) 2024-12-02T21:12:54,816 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44483 is added to blk_1073741839_1015 (size=5037) 2024-12-02T21:12:54,817 INFO [RS_CLOSE_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.14 KB at sequenceid=9 (bloomFilter=true), to=hdfs://localhost:41797/user/jenkins/test-data/f799a6d8-88fa-eef1-4b53-8273c9bb93a2/data/hbase/meta/1588230740/.tmp/info/8fd5896bb7de4e95bd84bf19c89995bf 2024-12-02T21:12:54,817 INFO [RS_CLOSE_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=78 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:41797/user/jenkins/test-data/f799a6d8-88fa-eef1-4b53-8273c9bb93a2/data/hbase/namespace/8bc7340229051f9fa244ac523d27f16c/.tmp/info/5a5cb70d884243c68ea47cca63b34650 2024-12-02T21:12:54,822 DEBUG [RS_CLOSE_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41797/user/jenkins/test-data/f799a6d8-88fa-eef1-4b53-8273c9bb93a2/data/hbase/namespace/8bc7340229051f9fa244ac523d27f16c/.tmp/info/5a5cb70d884243c68ea47cca63b34650 as hdfs://localhost:41797/user/jenkins/test-data/f799a6d8-88fa-eef1-4b53-8273c9bb93a2/data/hbase/namespace/8bc7340229051f9fa244ac523d27f16c/info/5a5cb70d884243c68ea47cca63b34650 2024-12-02T21:12:54,827 INFO [RS_CLOSE_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41797/user/jenkins/test-data/f799a6d8-88fa-eef1-4b53-8273c9bb93a2/data/hbase/namespace/8bc7340229051f9fa244ac523d27f16c/info/5a5cb70d884243c68ea47cca63b34650, entries=2, sequenceid=6, filesize=4.9 K 2024-12-02T21:12:54,828 INFO [RS_CLOSE_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3040): Finished flush of dataSize ~78 B/78, heapSize ~472 B/472, currentSize=0 B/0 for 8bc7340229051f9fa244ac523d27f16c in 39ms, sequenceid=6, compaction requested=false 2024-12-02T21:12:54,828 DEBUG [RS_CLOSE_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:namespace' 2024-12-02T21:12:54,832 DEBUG [RS_CLOSE_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41797/user/jenkins/test-data/f799a6d8-88fa-eef1-4b53-8273c9bb93a2/data/hbase/namespace/8bc7340229051f9fa244ac523d27f16c/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-02T21:12:54,833 INFO [RS_CLOSE_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1922): Closed hbase:namespace,,1733173974154.8bc7340229051f9fa244ac523d27f16c. 2024-12-02T21:12:54,833 DEBUG [RS_CLOSE_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1635): Region close journal for 8bc7340229051f9fa244ac523d27f16c: 2024-12-02T21:12:54,833 DEBUG [RS_CLOSE_REGION-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed hbase:namespace,,1733173974154.8bc7340229051f9fa244ac523d27f16c. 2024-12-02T21:12:54,836 DEBUG [RS_CLOSE_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41797/user/jenkins/test-data/f799a6d8-88fa-eef1-4b53-8273c9bb93a2/data/hbase/meta/1588230740/.tmp/table/8248092672b44decbd57d3e88d0da5ad is 51, key is hbase:namespace/table:state/1733173974534/Put/seqid=0 2024-12-02T21:12:54,840 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44483 is added to blk_1073741841_1017 (size=5242) 2024-12-02T21:12:54,841 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38953 is added to blk_1073741841_1017 (size=5242) 2024-12-02T21:12:54,841 INFO [RS_CLOSE_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=94 B at sequenceid=9 (bloomFilter=true), to=hdfs://localhost:41797/user/jenkins/test-data/f799a6d8-88fa-eef1-4b53-8273c9bb93a2/data/hbase/meta/1588230740/.tmp/table/8248092672b44decbd57d3e88d0da5ad 2024-12-02T21:12:54,846 DEBUG [RS_CLOSE_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41797/user/jenkins/test-data/f799a6d8-88fa-eef1-4b53-8273c9bb93a2/data/hbase/meta/1588230740/.tmp/info/8fd5896bb7de4e95bd84bf19c89995bf as hdfs://localhost:41797/user/jenkins/test-data/f799a6d8-88fa-eef1-4b53-8273c9bb93a2/data/hbase/meta/1588230740/info/8fd5896bb7de4e95bd84bf19c89995bf 2024-12-02T21:12:54,851 INFO [RS_CLOSE_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41797/user/jenkins/test-data/f799a6d8-88fa-eef1-4b53-8273c9bb93a2/data/hbase/meta/1588230740/info/8fd5896bb7de4e95bd84bf19c89995bf, entries=10, sequenceid=9, filesize=6.4 K 2024-12-02T21:12:54,852 DEBUG [RS_CLOSE_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41797/user/jenkins/test-data/f799a6d8-88fa-eef1-4b53-8273c9bb93a2/data/hbase/meta/1588230740/.tmp/table/8248092672b44decbd57d3e88d0da5ad as hdfs://localhost:41797/user/jenkins/test-data/f799a6d8-88fa-eef1-4b53-8273c9bb93a2/data/hbase/meta/1588230740/table/8248092672b44decbd57d3e88d0da5ad 2024-12-02T21:12:54,857 INFO [RS_CLOSE_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41797/user/jenkins/test-data/f799a6d8-88fa-eef1-4b53-8273c9bb93a2/data/hbase/meta/1588230740/table/8248092672b44decbd57d3e88d0da5ad, entries=2, sequenceid=9, filesize=5.1 K 2024-12-02T21:12:54,858 INFO [RS_CLOSE_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3040): Finished flush of dataSize ~1.23 KB/1264, heapSize ~2.59 KB/2648, currentSize=0 B/0 for 1588230740 in 69ms, sequenceid=9, compaction requested=false 2024-12-02T21:12:54,858 DEBUG [RS_CLOSE_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_META}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-02T21:12:54,862 DEBUG [RS_CLOSE_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41797/user/jenkins/test-data/f799a6d8-88fa-eef1-4b53-8273c9bb93a2/data/hbase/meta/1588230740/recovered.edits/12.seqid, newMaxSeqId=12, maxSeqId=1 2024-12-02T21:12:54,863 DEBUG [RS_CLOSE_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-02T21:12:54,863 INFO [RS_CLOSE_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1922): Closed hbase:meta,,1.1588230740 2024-12-02T21:12:54,863 DEBUG [RS_CLOSE_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1635): Region close journal for 1588230740: 2024-12-02T21:12:54,863 DEBUG [RS_CLOSE_META-regionserver/7d4f3b9a7081:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-02T21:12:54,989 INFO [RS:0;7d4f3b9a7081:45513 {}] regionserver.HRegionServer(1250): stopping server 7d4f3b9a7081,45513,1733173973117; all regions closed. 2024-12-02T21:12:54,989 DEBUG [WAL-Shutdown-0 {}] wal.FSHLog(499): Closing WAL writer in /user/jenkins/test-data/f799a6d8-88fa-eef1-4b53-8273c9bb93a2/WALs/7d4f3b9a7081,45513,1733173973117 2024-12-02T21:12:54,991 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44483 is added to blk_1073741834_1010 (size=2484) 2024-12-02T21:12:54,992 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38953 is added to blk_1073741834_1010 (size=2484) 2024-12-02T21:12:54,996 DEBUG [RS:0;7d4f3b9a7081:45513 {}] wal.AbstractFSWAL(1071): Moved 1 WAL file(s) to /user/jenkins/test-data/f799a6d8-88fa-eef1-4b53-8273c9bb93a2/oldWALs 2024-12-02T21:12:54,996 INFO [RS:0;7d4f3b9a7081:45513 {}] wal.AbstractFSWAL(1074): Closed WAL: FSHLog 7d4f3b9a7081%2C45513%2C1733173973117.meta:.meta(num 1733173974065) 2024-12-02T21:12:54,996 DEBUG [WAL-Shutdown-0 {}] wal.FSHLog(499): Closing WAL writer in /user/jenkins/test-data/f799a6d8-88fa-eef1-4b53-8273c9bb93a2/WALs/7d4f3b9a7081,45513,1733173973117 2024-12-02T21:12:54,999 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38953 is added to blk_1073741833_1009 (size=1414) 2024-12-02T21:12:55,000 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44483 is added to blk_1073741833_1009 (size=1414) 2024-12-02T21:12:55,002 DEBUG [RS:0;7d4f3b9a7081:45513 {}] wal.AbstractFSWAL(1071): Moved 1 WAL file(s) to /user/jenkins/test-data/f799a6d8-88fa-eef1-4b53-8273c9bb93a2/oldWALs 2024-12-02T21:12:55,002 INFO [RS:0;7d4f3b9a7081:45513 {}] wal.AbstractFSWAL(1074): Closed WAL: FSHLog 7d4f3b9a7081%2C45513%2C1733173973117:(num 1733173973624) 2024-12-02T21:12:55,002 DEBUG [RS:0;7d4f3b9a7081:45513 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T21:12:55,002 INFO [RS:0;7d4f3b9a7081:45513 {}] regionserver.LeaseManager(133): Closed leases 2024-12-02T21:12:55,003 INFO [RS:0;7d4f3b9a7081:45513 {}] hbase.ChoreService(370): Chore service for: regionserver/7d4f3b9a7081:0 had [ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-12-02T21:12:55,003 INFO [regionserver/7d4f3b9a7081:0.logRoller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-12-02T21:12:55,003 INFO [RS:0;7d4f3b9a7081:45513 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:45513 2024-12-02T21:12:55,011 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45469-0x101992db24c0000, quorum=127.0.0.1:56969, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-02T21:12:55,011 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45513-0x101992db24c0001, quorum=127.0.0.1:56969, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/7d4f3b9a7081,45513,1733173973117 2024-12-02T21:12:55,020 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [7d4f3b9a7081,45513,1733173973117] 2024-12-02T21:12:55,020 DEBUG [RegionServerTracker-0 {}] master.DeadServer(103): Processing 7d4f3b9a7081,45513,1733173973117; numProcessing=1 2024-12-02T21:12:55,028 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/draining/7d4f3b9a7081,45513,1733173973117 already deleted, retry=false 2024-12-02T21:12:55,028 INFO [RegionServerTracker-0 {}] master.ServerManager(652): Cluster shutdown set; 7d4f3b9a7081,45513,1733173973117 expired; onlineServers=0 2024-12-02T21:12:55,028 INFO [RegionServerTracker-0 {}] regionserver.HRegionServer(2561): ***** STOPPING region server '7d4f3b9a7081,45469,1733173972969' ***** 2024-12-02T21:12:55,028 INFO [RegionServerTracker-0 {}] regionserver.HRegionServer(2575): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-02T21:12:55,028 DEBUG [M:0;7d4f3b9a7081:45469 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5ce48aa4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=7d4f3b9a7081/172.17.0.2:0 2024-12-02T21:12:55,028 INFO [M:0;7d4f3b9a7081:45469 {}] regionserver.HRegionServer(1224): stopping server 7d4f3b9a7081,45469,1733173972969 2024-12-02T21:12:55,028 INFO [M:0;7d4f3b9a7081:45469 {}] regionserver.HRegionServer(1250): stopping server 7d4f3b9a7081,45469,1733173972969; all regions closed. 2024-12-02T21:12:55,028 DEBUG [M:0;7d4f3b9a7081:45469 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T21:12:55,028 DEBUG [M:0;7d4f3b9a7081:45469 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-02T21:12:55,029 DEBUG [M:0;7d4f3b9a7081:45469 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-02T21:12:55,029 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-02T21:12:55,029 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster-HFileCleaner.small.0-1733173973384 {}] cleaner.HFileCleaner(306): Exit Thread[master/7d4f3b9a7081:0:becomeActiveMaster-HFileCleaner.small.0-1733173973384,5,FailOnTimeoutGroup] 2024-12-02T21:12:55,029 DEBUG [master/7d4f3b9a7081:0:becomeActiveMaster-HFileCleaner.large.0-1733173973384 {}] cleaner.HFileCleaner(306): Exit Thread[master/7d4f3b9a7081:0:becomeActiveMaster-HFileCleaner.large.0-1733173973384,5,FailOnTimeoutGroup] 2024-12-02T21:12:55,029 INFO [M:0;7d4f3b9a7081:45469 {}] hbase.ChoreService(370): Chore service for: master/7d4f3b9a7081:0 had [] on shutdown 2024-12-02T21:12:55,029 DEBUG [M:0;7d4f3b9a7081:45469 {}] master.HMaster(1733): Stopping service threads 2024-12-02T21:12:55,029 INFO [M:0;7d4f3b9a7081:45469 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-02T21:12:55,029 INFO [M:0;7d4f3b9a7081:45469 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-02T21:12:55,029 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-02T21:12:55,036 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45469-0x101992db24c0000, quorum=127.0.0.1:56969, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-02T21:12:55,036 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45469-0x101992db24c0000, quorum=127.0.0.1:56969, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T21:12:55,037 DEBUG [M:0;7d4f3b9a7081:45469 {}] zookeeper.ZKUtil(347): master:45469-0x101992db24c0000, quorum=127.0.0.1:56969, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-02T21:12:55,037 WARN [M:0;7d4f3b9a7081:45469 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-02T21:12:55,037 INFO [M:0;7d4f3b9a7081:45469 {}] assignment.AssignmentManager(391): Stopping assignment manager 2024-12-02T21:12:55,037 INFO [M:0;7d4f3b9a7081:45469 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-02T21:12:55,037 DEBUG [M:0;7d4f3b9a7081:45469 {}] regionserver.HRegion(1681): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-02T21:12:55,037 INFO [M:0;7d4f3b9a7081:45469 {}] regionserver.HRegion(1703): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-02T21:12:55,037 DEBUG [M:0;7d4f3b9a7081:45469 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-02T21:12:55,037 DEBUG [M:0;7d4f3b9a7081:45469 {}] regionserver.HRegion(1791): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-02T21:12:55,037 DEBUG [M:0;7d4f3b9a7081:45469 {}] regionserver.HRegion(1801): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-02T21:12:55,037 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:45469-0x101992db24c0000, quorum=127.0.0.1:56969, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-02T21:12:55,037 INFO [M:0;7d4f3b9a7081:45469 {}] regionserver.HRegion(2837): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=25.32 KB heapSize=32.31 KB 2024-12-02T21:12:55,057 DEBUG [M:0;7d4f3b9a7081:45469 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41797/user/jenkins/test-data/f799a6d8-88fa-eef1-4b53-8273c9bb93a2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/d68b2306973a4562b09a88c733ff82f1 is 82, key is hbase:meta,,1/info:regioninfo/1733173974089/Put/seqid=0 2024-12-02T21:12:55,061 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38953 is added to blk_1073741842_1018 (size=5672) 2024-12-02T21:12:55,061 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44483 is added to blk_1073741842_1018 (size=5672) 2024-12-02T21:12:55,061 INFO [M:0;7d4f3b9a7081:45469 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=70 (bloomFilter=true), to=hdfs://localhost:41797/user/jenkins/test-data/f799a6d8-88fa-eef1-4b53-8273c9bb93a2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/d68b2306973a4562b09a88c733ff82f1 2024-12-02T21:12:55,078 DEBUG [M:0;7d4f3b9a7081:45469 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41797/user/jenkins/test-data/f799a6d8-88fa-eef1-4b53-8273c9bb93a2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/90045946900f4303ba2665b66bab7f57 is 696, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1733173974547/Put/seqid=0 2024-12-02T21:12:55,082 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44483 is added to blk_1073741843_1019 (size=6626) 2024-12-02T21:12:55,082 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38953 is added to blk_1073741843_1019 (size=6626) 2024-12-02T21:12:55,082 INFO [M:0;7d4f3b9a7081:45469 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.72 KB at sequenceid=70 (bloomFilter=true), to=hdfs://localhost:41797/user/jenkins/test-data/f799a6d8-88fa-eef1-4b53-8273c9bb93a2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/90045946900f4303ba2665b66bab7f57 2024-12-02T21:12:55,098 DEBUG [M:0;7d4f3b9a7081:45469 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41797/user/jenkins/test-data/f799a6d8-88fa-eef1-4b53-8273c9bb93a2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/14cb6e7441c04929ad2e13712e576c99 is 69, key is 7d4f3b9a7081,45513,1733173973117/rs:state/1733173973466/Put/seqid=0 2024-12-02T21:12:55,103 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44483 is added to blk_1073741844_1020 (size=5156) 2024-12-02T21:12:55,103 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38953 is added to blk_1073741844_1020 (size=5156) 2024-12-02T21:12:55,103 INFO [M:0;7d4f3b9a7081:45469 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=70 (bloomFilter=true), to=hdfs://localhost:41797/user/jenkins/test-data/f799a6d8-88fa-eef1-4b53-8273c9bb93a2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/14cb6e7441c04929ad2e13712e576c99 2024-12-02T21:12:55,120 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45513-0x101992db24c0001, quorum=127.0.0.1:56969, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-02T21:12:55,120 INFO [RS:0;7d4f3b9a7081:45513 {}] regionserver.HRegionServer(1307): Exiting; stopping=7d4f3b9a7081,45513,1733173973117; zookeeper connection closed. 2024-12-02T21:12:55,120 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45513-0x101992db24c0001, quorum=127.0.0.1:56969, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-02T21:12:55,120 DEBUG [M:0;7d4f3b9a7081:45469 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41797/user/jenkins/test-data/f799a6d8-88fa-eef1-4b53-8273c9bb93a2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/b71dbf41053b4251b116c5c043983c39 is 52, key is load_balancer_on/state:d/1733173974751/Put/seqid=0 2024-12-02T21:12:55,120 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@16f96f7d {}] hbase.MiniHBaseCluster$SingleFileSystemShutdownThread(216): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@16f96f7d 2024-12-02T21:12:55,120 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-12-02T21:12:55,124 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38953 is added to blk_1073741845_1021 (size=5056) 2024-12-02T21:12:55,124 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44483 is added to blk_1073741845_1021 (size=5056) 2024-12-02T21:12:55,125 INFO [M:0;7d4f3b9a7081:45469 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=70 (bloomFilter=true), to=hdfs://localhost:41797/user/jenkins/test-data/f799a6d8-88fa-eef1-4b53-8273c9bb93a2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/b71dbf41053b4251b116c5c043983c39 2024-12-02T21:12:55,128 DEBUG [M:0;7d4f3b9a7081:45469 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41797/user/jenkins/test-data/f799a6d8-88fa-eef1-4b53-8273c9bb93a2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/d68b2306973a4562b09a88c733ff82f1 as hdfs://localhost:41797/user/jenkins/test-data/f799a6d8-88fa-eef1-4b53-8273c9bb93a2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/d68b2306973a4562b09a88c733ff82f1 2024-12-02T21:12:55,132 INFO [M:0;7d4f3b9a7081:45469 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41797/user/jenkins/test-data/f799a6d8-88fa-eef1-4b53-8273c9bb93a2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/d68b2306973a4562b09a88c733ff82f1, entries=8, sequenceid=70, filesize=5.5 K 2024-12-02T21:12:55,133 DEBUG [M:0;7d4f3b9a7081:45469 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41797/user/jenkins/test-data/f799a6d8-88fa-eef1-4b53-8273c9bb93a2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/90045946900f4303ba2665b66bab7f57 as hdfs://localhost:41797/user/jenkins/test-data/f799a6d8-88fa-eef1-4b53-8273c9bb93a2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/90045946900f4303ba2665b66bab7f57 2024-12-02T21:12:55,138 INFO [M:0;7d4f3b9a7081:45469 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41797/user/jenkins/test-data/f799a6d8-88fa-eef1-4b53-8273c9bb93a2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/90045946900f4303ba2665b66bab7f57, entries=8, sequenceid=70, filesize=6.5 K 2024-12-02T21:12:55,139 DEBUG [M:0;7d4f3b9a7081:45469 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41797/user/jenkins/test-data/f799a6d8-88fa-eef1-4b53-8273c9bb93a2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/14cb6e7441c04929ad2e13712e576c99 as hdfs://localhost:41797/user/jenkins/test-data/f799a6d8-88fa-eef1-4b53-8273c9bb93a2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/14cb6e7441c04929ad2e13712e576c99 2024-12-02T21:12:55,144 INFO [M:0;7d4f3b9a7081:45469 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41797/user/jenkins/test-data/f799a6d8-88fa-eef1-4b53-8273c9bb93a2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/14cb6e7441c04929ad2e13712e576c99, entries=1, sequenceid=70, filesize=5.0 K 2024-12-02T21:12:55,145 DEBUG [M:0;7d4f3b9a7081:45469 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41797/user/jenkins/test-data/f799a6d8-88fa-eef1-4b53-8273c9bb93a2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/b71dbf41053b4251b116c5c043983c39 as hdfs://localhost:41797/user/jenkins/test-data/f799a6d8-88fa-eef1-4b53-8273c9bb93a2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/b71dbf41053b4251b116c5c043983c39 2024-12-02T21:12:55,149 INFO [M:0;7d4f3b9a7081:45469 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41797/user/jenkins/test-data/f799a6d8-88fa-eef1-4b53-8273c9bb93a2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/b71dbf41053b4251b116c5c043983c39, entries=1, sequenceid=70, filesize=4.9 K 2024-12-02T21:12:55,150 INFO [M:0;7d4f3b9a7081:45469 {}] regionserver.HRegion(3040): Finished flush of dataSize ~25.32 KB/25929, heapSize ~32.25 KB/33024, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 113ms, sequenceid=70, compaction requested=false 2024-12-02T21:12:55,152 INFO [M:0;7d4f3b9a7081:45469 {}] regionserver.HRegion(1922): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-02T21:12:55,152 DEBUG [M:0;7d4f3b9a7081:45469 {}] regionserver.HRegion(1635): Region close journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-02T21:12:55,152 DEBUG [WAL-Shutdown-0 {}] wal.FSHLog(499): Closing WAL writer in /user/jenkins/test-data/f799a6d8-88fa-eef1-4b53-8273c9bb93a2/MasterData/WALs/7d4f3b9a7081,45469,1733173972969 2024-12-02T21:12:55,153 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38953 is added to blk_1073741830_1006 (size=31030) 2024-12-02T21:12:55,154 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44483 is added to blk_1073741830_1006 (size=31030) 2024-12-02T21:12:55,154 INFO [M:0;7d4f3b9a7081:45469 {}] flush.MasterFlushTableProcedureManager(91): stop: server shutting down. 2024-12-02T21:12:55,154 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-12-02T21:12:55,154 INFO [M:0;7d4f3b9a7081:45469 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:45469 2024-12-02T21:12:55,161 DEBUG [M:0;7d4f3b9a7081:45469 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/rs/7d4f3b9a7081,45469,1733173972969 already deleted, retry=false 2024-12-02T21:12:55,270 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45469-0x101992db24c0000, quorum=127.0.0.1:56969, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-02T21:12:55,270 INFO [M:0;7d4f3b9a7081:45469 {}] regionserver.HRegionServer(1307): Exiting; stopping=7d4f3b9a7081,45469,1733173972969; zookeeper connection closed. 2024-12-02T21:12:55,270 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45469-0x101992db24c0000, quorum=127.0.0.1:56969, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-02T21:12:55,272 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@654055a1{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-02T21:12:55,272 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@30fa4dee{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-02T21:12:55,272 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-02T21:12:55,273 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1aef51eb{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-02T21:12:55,273 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4b72c457{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/585d9600-f30d-a03e-c8cb-89565478f6d7/hadoop.log.dir/,STOPPED} 2024-12-02T21:12:55,274 WARN [BP-1842846650-172.17.0.2-1733173971512 heartbeating to localhost/127.0.0.1:41797 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-02T21:12:55,274 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-02T21:12:55,274 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-02T21:12:55,274 WARN [BP-1842846650-172.17.0.2-1733173971512 heartbeating to localhost/127.0.0.1:41797 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1842846650-172.17.0.2-1733173971512 (Datanode Uuid 9397b931-0a97-4534-b59b-d0524d24262a) service to localhost/127.0.0.1:41797 2024-12-02T21:12:55,274 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/585d9600-f30d-a03e-c8cb-89565478f6d7/cluster_d1106b91-8d14-61f9-5ce5-728fd048ffe2/dfs/data/data3/current/BP-1842846650-172.17.0.2-1733173971512 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-02T21:12:55,274 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/585d9600-f30d-a03e-c8cb-89565478f6d7/cluster_d1106b91-8d14-61f9-5ce5-728fd048ffe2/dfs/data/data4/current/BP-1842846650-172.17.0.2-1733173971512 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-02T21:12:55,275 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-02T21:12:55,276 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@53367781{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-02T21:12:55,276 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@9e26b70{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-02T21:12:55,276 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-02T21:12:55,277 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@39941173{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-02T21:12:55,277 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@68c8979e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/585d9600-f30d-a03e-c8cb-89565478f6d7/hadoop.log.dir/,STOPPED} 2024-12-02T21:12:55,278 WARN [BP-1842846650-172.17.0.2-1733173971512 heartbeating to localhost/127.0.0.1:41797 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-02T21:12:55,278 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-02T21:12:55,278 WARN [BP-1842846650-172.17.0.2-1733173971512 heartbeating to localhost/127.0.0.1:41797 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1842846650-172.17.0.2-1733173971512 (Datanode Uuid 78737652-ad5c-4abe-aaab-05304046d616) service to localhost/127.0.0.1:41797 2024-12-02T21:12:55,278 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-02T21:12:55,278 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/585d9600-f30d-a03e-c8cb-89565478f6d7/cluster_d1106b91-8d14-61f9-5ce5-728fd048ffe2/dfs/data/data1/current/BP-1842846650-172.17.0.2-1733173971512 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-02T21:12:55,278 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/585d9600-f30d-a03e-c8cb-89565478f6d7/cluster_d1106b91-8d14-61f9-5ce5-728fd048ffe2/dfs/data/data2/current/BP-1842846650-172.17.0.2-1733173971512 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-02T21:12:55,279 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-02T21:12:55,285 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1b720e42{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-02T21:12:55,286 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@542dfbed{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-02T21:12:55,286 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-02T21:12:55,286 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@59c5fb4b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-02T21:12:55,286 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@33224ca9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/585d9600-f30d-a03e-c8cb-89565478f6d7/hadoop.log.dir/,STOPPED} 2024-12-02T21:12:55,291 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(345): Shutdown MiniZK cluster with all ZK servers 2024-12-02T21:12:55,304 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1347): Minicluster is down 2024-12-02T21:12:55,311 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnNothingWritten Thread=146 (was 126) - Thread LEAK? -, OpenFileDescriptor=517 (was 487) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=70 (was 68) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=6735 (was 6743)